Whitespace normalization.
This commit is contained in:
parent
752d3f557e
commit
88869f9787
|
@ -73,16 +73,16 @@ ConfigParser -- responsible for for parsing a list of
|
|||
1, only)
|
||||
|
||||
remove_section(section)
|
||||
remove the given file section and all its options
|
||||
remove the given file section and all its options
|
||||
|
||||
remove_option(section, option)
|
||||
remove the given option from the given section
|
||||
remove the given option from the given section
|
||||
|
||||
set(section, option, value)
|
||||
set the given option
|
||||
|
||||
write(fp)
|
||||
write the configuration state in .ini format
|
||||
write the configuration state in .ini format
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
@ -94,7 +94,7 @@ DEFAULTSECT = "DEFAULT"
|
|||
MAX_INTERPOLATION_DEPTH = 10
|
||||
|
||||
|
||||
|
||||
|
||||
# exception classes
|
||||
class Error(Exception):
|
||||
def __init__(self, msg=''):
|
||||
|
@ -166,7 +166,7 @@ class MissingSectionHeaderError(ParsingError):
|
|||
self.line = line
|
||||
|
||||
|
||||
|
||||
|
||||
class ConfigParser:
|
||||
def __init__(self, defaults=None):
|
||||
self.__sections = {}
|
||||
|
|
11
Lib/chunk.py
11
Lib/chunk.py
|
@ -52,7 +52,7 @@ class Chunk:
|
|||
def __init__(self, file, align = 1, bigendian = 1, inclheader = 0):
|
||||
import struct
|
||||
self.closed = 0
|
||||
self.align = align # whether to align to word (2-byte) boundaries
|
||||
self.align = align # whether to align to word (2-byte) boundaries
|
||||
if bigendian:
|
||||
strflag = '>'
|
||||
else:
|
||||
|
@ -97,7 +97,7 @@ class Chunk:
|
|||
"""Seek to specified position into the chunk.
|
||||
Default position is 0 (start of chunk).
|
||||
If the file is not seekable, this will result in an error.
|
||||
"""
|
||||
"""
|
||||
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
|
@ -121,7 +121,7 @@ class Chunk:
|
|||
"""Read at most size bytes from the chunk.
|
||||
If size is omitted or negative, read until the end
|
||||
of the chunk.
|
||||
"""
|
||||
"""
|
||||
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
|
@ -130,7 +130,7 @@ class Chunk:
|
|||
if size < 0:
|
||||
size = self.chunksize - self.size_read
|
||||
if size > self.chunksize - self.size_read:
|
||||
size = self.chunksize - self.size_read
|
||||
size = self.chunksize - self.size_read
|
||||
data = self.file.read(size)
|
||||
self.size_read = self.size_read + len(data)
|
||||
if self.size_read == self.chunksize and \
|
||||
|
@ -145,7 +145,7 @@ class Chunk:
|
|||
If you are not interested in the contents of the chunk,
|
||||
this method should be called so that the file points to
|
||||
the start of the next chunk.
|
||||
"""
|
||||
"""
|
||||
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
|
@ -165,4 +165,3 @@ class Chunk:
|
|||
dummy = self.read(n)
|
||||
if not dummy:
|
||||
raise EOFError
|
||||
|
||||
|
|
|
@ -25,19 +25,19 @@ except ImportError,why:
|
|||
BOM = struct.pack('=H',0xFEFF)
|
||||
#
|
||||
BOM_BE = BOM32_BE = '\376\377'
|
||||
# corresponds to Unicode U+FEFF in UTF-16 on big endian
|
||||
# platforms == ZERO WIDTH NO-BREAK SPACE
|
||||
# corresponds to Unicode U+FEFF in UTF-16 on big endian
|
||||
# platforms == ZERO WIDTH NO-BREAK SPACE
|
||||
BOM_LE = BOM32_LE = '\377\376'
|
||||
# corresponds to Unicode U+FFFE in UTF-16 on little endian
|
||||
# platforms == defined as being an illegal Unicode character
|
||||
# corresponds to Unicode U+FFFE in UTF-16 on little endian
|
||||
# platforms == defined as being an illegal Unicode character
|
||||
|
||||
#
|
||||
# 64-bit Byte Order Marks
|
||||
#
|
||||
BOM64_BE = '\000\000\376\377'
|
||||
# corresponds to Unicode U+0000FEFF in UCS-4
|
||||
# corresponds to Unicode U+0000FEFF in UCS-4
|
||||
BOM64_LE = '\377\376\000\000'
|
||||
# corresponds to Unicode U+0000FFFE in UCS-4
|
||||
# corresponds to Unicode U+0000FFFE in UCS-4
|
||||
|
||||
|
||||
### Codec base classes (defining the API)
|
||||
|
|
356
Lib/copy.py
356
Lib/copy.py
|
@ -2,10 +2,10 @@
|
|||
|
||||
Interface summary:
|
||||
|
||||
import copy
|
||||
import copy
|
||||
|
||||
x = copy.copy(y) # make a shallow copy of y
|
||||
x = copy.deepcopy(y) # make a deep copy of y
|
||||
x = copy.copy(y) # make a shallow copy of y
|
||||
x = copy.deepcopy(y) # make a deep copy of y
|
||||
|
||||
For module specific errors, copy.error is raised.
|
||||
|
||||
|
@ -53,8 +53,8 @@ __getstate__() and __setstate__(). See the documentation for module
|
|||
import types
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
error = Error # backward compatibility
|
||||
pass
|
||||
error = Error # backward compatibility
|
||||
|
||||
try:
|
||||
from org.python.core import PyStringMap
|
||||
|
@ -62,28 +62,28 @@ except ImportError:
|
|||
PyStringMap = None
|
||||
|
||||
def copy(x):
|
||||
"""Shallow copy operation on arbitrary Python objects.
|
||||
"""Shallow copy operation on arbitrary Python objects.
|
||||
|
||||
See the module's __doc__ string for more info.
|
||||
"""
|
||||
See the module's __doc__ string for more info.
|
||||
"""
|
||||
|
||||
try:
|
||||
copierfunction = _copy_dispatch[type(x)]
|
||||
except KeyError:
|
||||
try:
|
||||
copier = x.__copy__
|
||||
except AttributeError:
|
||||
raise error, \
|
||||
"un(shallow)copyable object of type %s" % type(x)
|
||||
y = copier()
|
||||
else:
|
||||
y = copierfunction(x)
|
||||
return y
|
||||
try:
|
||||
copierfunction = _copy_dispatch[type(x)]
|
||||
except KeyError:
|
||||
try:
|
||||
copier = x.__copy__
|
||||
except AttributeError:
|
||||
raise error, \
|
||||
"un(shallow)copyable object of type %s" % type(x)
|
||||
y = copier()
|
||||
else:
|
||||
y = copierfunction(x)
|
||||
return y
|
||||
|
||||
_copy_dispatch = d = {}
|
||||
|
||||
def _copy_atomic(x):
|
||||
return x
|
||||
return x
|
||||
d[types.NoneType] = _copy_atomic
|
||||
d[types.IntType] = _copy_atomic
|
||||
d[types.LongType] = _copy_atomic
|
||||
|
@ -91,78 +91,78 @@ d[types.FloatType] = _copy_atomic
|
|||
d[types.StringType] = _copy_atomic
|
||||
d[types.UnicodeType] = _copy_atomic
|
||||
try:
|
||||
d[types.CodeType] = _copy_atomic
|
||||
d[types.CodeType] = _copy_atomic
|
||||
except AttributeError:
|
||||
pass
|
||||
pass
|
||||
d[types.TypeType] = _copy_atomic
|
||||
d[types.XRangeType] = _copy_atomic
|
||||
d[types.ClassType] = _copy_atomic
|
||||
|
||||
def _copy_list(x):
|
||||
return x[:]
|
||||
return x[:]
|
||||
d[types.ListType] = _copy_list
|
||||
|
||||
def _copy_tuple(x):
|
||||
return x[:]
|
||||
return x[:]
|
||||
d[types.TupleType] = _copy_tuple
|
||||
|
||||
def _copy_dict(x):
|
||||
return x.copy()
|
||||
return x.copy()
|
||||
d[types.DictionaryType] = _copy_dict
|
||||
if PyStringMap is not None:
|
||||
d[PyStringMap] = _copy_dict
|
||||
|
||||
def _copy_inst(x):
|
||||
if hasattr(x, '__copy__'):
|
||||
return x.__copy__()
|
||||
if hasattr(x, '__getinitargs__'):
|
||||
args = x.__getinitargs__()
|
||||
y = apply(x.__class__, args)
|
||||
else:
|
||||
y = _EmptyClass()
|
||||
y.__class__ = x.__class__
|
||||
if hasattr(x, '__getstate__'):
|
||||
state = x.__getstate__()
|
||||
else:
|
||||
state = x.__dict__
|
||||
if hasattr(y, '__setstate__'):
|
||||
y.__setstate__(state)
|
||||
else:
|
||||
y.__dict__.update(state)
|
||||
return y
|
||||
if hasattr(x, '__copy__'):
|
||||
return x.__copy__()
|
||||
if hasattr(x, '__getinitargs__'):
|
||||
args = x.__getinitargs__()
|
||||
y = apply(x.__class__, args)
|
||||
else:
|
||||
y = _EmptyClass()
|
||||
y.__class__ = x.__class__
|
||||
if hasattr(x, '__getstate__'):
|
||||
state = x.__getstate__()
|
||||
else:
|
||||
state = x.__dict__
|
||||
if hasattr(y, '__setstate__'):
|
||||
y.__setstate__(state)
|
||||
else:
|
||||
y.__dict__.update(state)
|
||||
return y
|
||||
d[types.InstanceType] = _copy_inst
|
||||
|
||||
del d
|
||||
|
||||
def deepcopy(x, memo = None):
|
||||
"""Deep copy operation on arbitrary Python objects.
|
||||
"""Deep copy operation on arbitrary Python objects.
|
||||
|
||||
See the module's __doc__ string for more info.
|
||||
"""
|
||||
See the module's __doc__ string for more info.
|
||||
"""
|
||||
|
||||
if memo is None:
|
||||
memo = {}
|
||||
d = id(x)
|
||||
if memo.has_key(d):
|
||||
return memo[d]
|
||||
try:
|
||||
copierfunction = _deepcopy_dispatch[type(x)]
|
||||
except KeyError:
|
||||
try:
|
||||
copier = x.__deepcopy__
|
||||
except AttributeError:
|
||||
raise error, \
|
||||
"un-deep-copyable object of type %s" % type(x)
|
||||
y = copier(memo)
|
||||
else:
|
||||
y = copierfunction(x, memo)
|
||||
memo[d] = y
|
||||
return y
|
||||
if memo is None:
|
||||
memo = {}
|
||||
d = id(x)
|
||||
if memo.has_key(d):
|
||||
return memo[d]
|
||||
try:
|
||||
copierfunction = _deepcopy_dispatch[type(x)]
|
||||
except KeyError:
|
||||
try:
|
||||
copier = x.__deepcopy__
|
||||
except AttributeError:
|
||||
raise error, \
|
||||
"un-deep-copyable object of type %s" % type(x)
|
||||
y = copier(memo)
|
||||
else:
|
||||
y = copierfunction(x, memo)
|
||||
memo[d] = y
|
||||
return y
|
||||
|
||||
_deepcopy_dispatch = d = {}
|
||||
|
||||
def _deepcopy_atomic(x, memo):
|
||||
return x
|
||||
return x
|
||||
d[types.NoneType] = _deepcopy_atomic
|
||||
d[types.IntType] = _deepcopy_atomic
|
||||
d[types.LongType] = _deepcopy_atomic
|
||||
|
@ -174,81 +174,81 @@ d[types.TypeType] = _deepcopy_atomic
|
|||
d[types.XRangeType] = _deepcopy_atomic
|
||||
|
||||
def _deepcopy_list(x, memo):
|
||||
y = []
|
||||
memo[id(x)] = y
|
||||
for a in x:
|
||||
y.append(deepcopy(a, memo))
|
||||
return y
|
||||
y = []
|
||||
memo[id(x)] = y
|
||||
for a in x:
|
||||
y.append(deepcopy(a, memo))
|
||||
return y
|
||||
d[types.ListType] = _deepcopy_list
|
||||
|
||||
def _deepcopy_tuple(x, memo):
|
||||
y = []
|
||||
for a in x:
|
||||
y.append(deepcopy(a, memo))
|
||||
d = id(x)
|
||||
try:
|
||||
return memo[d]
|
||||
except KeyError:
|
||||
pass
|
||||
for i in range(len(x)):
|
||||
if x[i] is not y[i]:
|
||||
y = tuple(y)
|
||||
break
|
||||
else:
|
||||
y = x
|
||||
memo[d] = y
|
||||
return y
|
||||
y = []
|
||||
for a in x:
|
||||
y.append(deepcopy(a, memo))
|
||||
d = id(x)
|
||||
try:
|
||||
return memo[d]
|
||||
except KeyError:
|
||||
pass
|
||||
for i in range(len(x)):
|
||||
if x[i] is not y[i]:
|
||||
y = tuple(y)
|
||||
break
|
||||
else:
|
||||
y = x
|
||||
memo[d] = y
|
||||
return y
|
||||
d[types.TupleType] = _deepcopy_tuple
|
||||
|
||||
def _deepcopy_dict(x, memo):
|
||||
y = {}
|
||||
memo[id(x)] = y
|
||||
for key in x.keys():
|
||||
y[deepcopy(key, memo)] = deepcopy(x[key], memo)
|
||||
return y
|
||||
y = {}
|
||||
memo[id(x)] = y
|
||||
for key in x.keys():
|
||||
y[deepcopy(key, memo)] = deepcopy(x[key], memo)
|
||||
return y
|
||||
d[types.DictionaryType] = _deepcopy_dict
|
||||
if PyStringMap is not None:
|
||||
d[PyStringMap] = _deepcopy_dict
|
||||
|
||||
def _keep_alive(x, memo):
|
||||
"""Keeps a reference to the object x in the memo.
|
||||
"""Keeps a reference to the object x in the memo.
|
||||
|
||||
Because we remember objects by their id, we have
|
||||
to assure that possibly temporary objects are kept
|
||||
alive by referencing them.
|
||||
We store a reference at the id of the memo, which should
|
||||
normally not be used unless someone tries to deepcopy
|
||||
the memo itself...
|
||||
"""
|
||||
try:
|
||||
memo[id(memo)].append(x)
|
||||
except KeyError:
|
||||
# aha, this is the first one :-)
|
||||
memo[id(memo)]=[x]
|
||||
Because we remember objects by their id, we have
|
||||
to assure that possibly temporary objects are kept
|
||||
alive by referencing them.
|
||||
We store a reference at the id of the memo, which should
|
||||
normally not be used unless someone tries to deepcopy
|
||||
the memo itself...
|
||||
"""
|
||||
try:
|
||||
memo[id(memo)].append(x)
|
||||
except KeyError:
|
||||
# aha, this is the first one :-)
|
||||
memo[id(memo)]=[x]
|
||||
|
||||
def _deepcopy_inst(x, memo):
|
||||
if hasattr(x, '__deepcopy__'):
|
||||
return x.__deepcopy__(memo)
|
||||
if hasattr(x, '__getinitargs__'):
|
||||
args = x.__getinitargs__()
|
||||
_keep_alive(args, memo)
|
||||
args = deepcopy(args, memo)
|
||||
y = apply(x.__class__, args)
|
||||
else:
|
||||
y = _EmptyClass()
|
||||
y.__class__ = x.__class__
|
||||
memo[id(x)] = y
|
||||
if hasattr(x, '__getstate__'):
|
||||
state = x.__getstate__()
|
||||
_keep_alive(state, memo)
|
||||
else:
|
||||
state = x.__dict__
|
||||
state = deepcopy(state, memo)
|
||||
if hasattr(y, '__setstate__'):
|
||||
y.__setstate__(state)
|
||||
else:
|
||||
y.__dict__.update(state)
|
||||
return y
|
||||
if hasattr(x, '__deepcopy__'):
|
||||
return x.__deepcopy__(memo)
|
||||
if hasattr(x, '__getinitargs__'):
|
||||
args = x.__getinitargs__()
|
||||
_keep_alive(args, memo)
|
||||
args = deepcopy(args, memo)
|
||||
y = apply(x.__class__, args)
|
||||
else:
|
||||
y = _EmptyClass()
|
||||
y.__class__ = x.__class__
|
||||
memo[id(x)] = y
|
||||
if hasattr(x, '__getstate__'):
|
||||
state = x.__getstate__()
|
||||
_keep_alive(state, memo)
|
||||
else:
|
||||
state = x.__dict__
|
||||
state = deepcopy(state, memo)
|
||||
if hasattr(y, '__setstate__'):
|
||||
y.__setstate__(state)
|
||||
else:
|
||||
y.__dict__.update(state)
|
||||
return y
|
||||
d[types.InstanceType] = _deepcopy_inst
|
||||
|
||||
del d
|
||||
|
@ -260,57 +260,57 @@ class _EmptyClass:
|
|||
pass
|
||||
|
||||
def _test():
|
||||
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
|
||||
{'abc': 'ABC'}, (), [], {}]
|
||||
l1 = copy(l)
|
||||
print l1==l
|
||||
l1 = map(copy, l)
|
||||
print l1==l
|
||||
l1 = deepcopy(l)
|
||||
print l1==l
|
||||
class C:
|
||||
def __init__(self, arg=None):
|
||||
self.a = 1
|
||||
self.arg = arg
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
file = sys.argv[0]
|
||||
else:
|
||||
file = __file__
|
||||
self.fp = open(file)
|
||||
self.fp.close()
|
||||
def __getstate__(self):
|
||||
return {'a': self.a, 'arg': self.arg}
|
||||
def __setstate__(self, state):
|
||||
for key in state.keys():
|
||||
setattr(self, key, state[key])
|
||||
def __deepcopy__(self, memo = None):
|
||||
new = self.__class__(deepcopy(self.arg, memo))
|
||||
new.a = self.a
|
||||
return new
|
||||
c = C('argument sketch')
|
||||
l.append(c)
|
||||
l2 = copy(l)
|
||||
print l == l2
|
||||
print l
|
||||
print l2
|
||||
l2 = deepcopy(l)
|
||||
print l == l2
|
||||
print l
|
||||
print l2
|
||||
l.append({l[1]: l, 'xyz': l[2]})
|
||||
l3 = copy(l)
|
||||
import repr
|
||||
print map(repr.repr, l)
|
||||
print map(repr.repr, l1)
|
||||
print map(repr.repr, l2)
|
||||
print map(repr.repr, l3)
|
||||
l3 = deepcopy(l)
|
||||
import repr
|
||||
print map(repr.repr, l)
|
||||
print map(repr.repr, l1)
|
||||
print map(repr.repr, l2)
|
||||
print map(repr.repr, l3)
|
||||
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
|
||||
{'abc': 'ABC'}, (), [], {}]
|
||||
l1 = copy(l)
|
||||
print l1==l
|
||||
l1 = map(copy, l)
|
||||
print l1==l
|
||||
l1 = deepcopy(l)
|
||||
print l1==l
|
||||
class C:
|
||||
def __init__(self, arg=None):
|
||||
self.a = 1
|
||||
self.arg = arg
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
file = sys.argv[0]
|
||||
else:
|
||||
file = __file__
|
||||
self.fp = open(file)
|
||||
self.fp.close()
|
||||
def __getstate__(self):
|
||||
return {'a': self.a, 'arg': self.arg}
|
||||
def __setstate__(self, state):
|
||||
for key in state.keys():
|
||||
setattr(self, key, state[key])
|
||||
def __deepcopy__(self, memo = None):
|
||||
new = self.__class__(deepcopy(self.arg, memo))
|
||||
new.a = self.a
|
||||
return new
|
||||
c = C('argument sketch')
|
||||
l.append(c)
|
||||
l2 = copy(l)
|
||||
print l == l2
|
||||
print l
|
||||
print l2
|
||||
l2 = deepcopy(l)
|
||||
print l == l2
|
||||
print l
|
||||
print l2
|
||||
l.append({l[1]: l, 'xyz': l[2]})
|
||||
l3 = copy(l)
|
||||
import repr
|
||||
print map(repr.repr, l)
|
||||
print map(repr.repr, l1)
|
||||
print map(repr.repr, l2)
|
||||
print map(repr.repr, l3)
|
||||
l3 = deepcopy(l)
|
||||
import repr
|
||||
print map(repr.repr, l)
|
||||
print map(repr.repr, l1)
|
||||
print map(repr.repr, l2)
|
||||
print map(repr.repr, l3)
|
||||
|
||||
if __name__ == '__main__':
|
||||
_test()
|
||||
_test()
|
||||
|
|
316
Lib/dis.py
316
Lib/dis.py
|
@ -5,115 +5,115 @@ import string
|
|||
import types
|
||||
|
||||
def dis(x=None):
|
||||
"""Disassemble classes, methods, functions, or code.
|
||||
"""Disassemble classes, methods, functions, or code.
|
||||
|
||||
With no argument, disassemble the last traceback.
|
||||
With no argument, disassemble the last traceback.
|
||||
|
||||
"""
|
||||
if not x:
|
||||
distb()
|
||||
return
|
||||
if type(x) is types.InstanceType:
|
||||
x = x.__class__
|
||||
if hasattr(x, '__dict__'):
|
||||
items = x.__dict__.items()
|
||||
items.sort()
|
||||
for name, x1 in items:
|
||||
if type(x1) in (types.MethodType,
|
||||
types.FunctionType,
|
||||
types.CodeType):
|
||||
print "Disassembly of %s:" % name
|
||||
try:
|
||||
dis(x1)
|
||||
except TypeError, msg:
|
||||
print "Sorry:", msg
|
||||
print
|
||||
else:
|
||||
if hasattr(x, 'im_func'):
|
||||
x = x.im_func
|
||||
if hasattr(x, 'func_code'):
|
||||
x = x.func_code
|
||||
if hasattr(x, 'co_code'):
|
||||
disassemble(x)
|
||||
else:
|
||||
raise TypeError, \
|
||||
"don't know how to disassemble %s objects" % \
|
||||
type(x).__name__
|
||||
"""
|
||||
if not x:
|
||||
distb()
|
||||
return
|
||||
if type(x) is types.InstanceType:
|
||||
x = x.__class__
|
||||
if hasattr(x, '__dict__'):
|
||||
items = x.__dict__.items()
|
||||
items.sort()
|
||||
for name, x1 in items:
|
||||
if type(x1) in (types.MethodType,
|
||||
types.FunctionType,
|
||||
types.CodeType):
|
||||
print "Disassembly of %s:" % name
|
||||
try:
|
||||
dis(x1)
|
||||
except TypeError, msg:
|
||||
print "Sorry:", msg
|
||||
print
|
||||
else:
|
||||
if hasattr(x, 'im_func'):
|
||||
x = x.im_func
|
||||
if hasattr(x, 'func_code'):
|
||||
x = x.func_code
|
||||
if hasattr(x, 'co_code'):
|
||||
disassemble(x)
|
||||
else:
|
||||
raise TypeError, \
|
||||
"don't know how to disassemble %s objects" % \
|
||||
type(x).__name__
|
||||
|
||||
def distb(tb=None):
|
||||
"""Disassemble a traceback (default: last traceback)."""
|
||||
if not tb:
|
||||
try:
|
||||
tb = sys.last_traceback
|
||||
except AttributeError:
|
||||
raise RuntimeError, "no last traceback to disassemble"
|
||||
while tb.tb_next: tb = tb.tb_next
|
||||
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
|
||||
"""Disassemble a traceback (default: last traceback)."""
|
||||
if not tb:
|
||||
try:
|
||||
tb = sys.last_traceback
|
||||
except AttributeError:
|
||||
raise RuntimeError, "no last traceback to disassemble"
|
||||
while tb.tb_next: tb = tb.tb_next
|
||||
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
|
||||
|
||||
def disassemble(co, lasti=-1):
|
||||
"""Disassemble a code object."""
|
||||
code = co.co_code
|
||||
labels = findlabels(code)
|
||||
n = len(code)
|
||||
i = 0
|
||||
extended_arg = 0
|
||||
while i < n:
|
||||
c = code[i]
|
||||
op = ord(c)
|
||||
if op == SET_LINENO and i > 0: print # Extra blank line
|
||||
if i == lasti: print '-->',
|
||||
else: print ' ',
|
||||
if i in labels: print '>>',
|
||||
else: print ' ',
|
||||
print string.rjust(`i`, 4),
|
||||
print string.ljust(opname[op], 20),
|
||||
i = i+1
|
||||
if op >= HAVE_ARGUMENT:
|
||||
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
|
||||
extended_arg = 0
|
||||
i = i+2
|
||||
if op == EXTENDED_ARG:
|
||||
extended_arg = oparg*65536L
|
||||
print string.rjust(`oparg`, 5),
|
||||
if op in hasconst:
|
||||
print '(' + `co.co_consts[oparg]` + ')',
|
||||
elif op in hasname:
|
||||
print '(' + co.co_names[oparg] + ')',
|
||||
elif op in hasjrel:
|
||||
print '(to ' + `i + oparg` + ')',
|
||||
elif op in haslocal:
|
||||
print '(' + co.co_varnames[oparg] + ')',
|
||||
elif op in hascompare:
|
||||
print '(' + cmp_op[oparg] + ')',
|
||||
print
|
||||
"""Disassemble a code object."""
|
||||
code = co.co_code
|
||||
labels = findlabels(code)
|
||||
n = len(code)
|
||||
i = 0
|
||||
extended_arg = 0
|
||||
while i < n:
|
||||
c = code[i]
|
||||
op = ord(c)
|
||||
if op == SET_LINENO and i > 0: print # Extra blank line
|
||||
if i == lasti: print '-->',
|
||||
else: print ' ',
|
||||
if i in labels: print '>>',
|
||||
else: print ' ',
|
||||
print string.rjust(`i`, 4),
|
||||
print string.ljust(opname[op], 20),
|
||||
i = i+1
|
||||
if op >= HAVE_ARGUMENT:
|
||||
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
|
||||
extended_arg = 0
|
||||
i = i+2
|
||||
if op == EXTENDED_ARG:
|
||||
extended_arg = oparg*65536L
|
||||
print string.rjust(`oparg`, 5),
|
||||
if op in hasconst:
|
||||
print '(' + `co.co_consts[oparg]` + ')',
|
||||
elif op in hasname:
|
||||
print '(' + co.co_names[oparg] + ')',
|
||||
elif op in hasjrel:
|
||||
print '(to ' + `i + oparg` + ')',
|
||||
elif op in haslocal:
|
||||
print '(' + co.co_varnames[oparg] + ')',
|
||||
elif op in hascompare:
|
||||
print '(' + cmp_op[oparg] + ')',
|
||||
print
|
||||
|
||||
disco = disassemble # XXX For backwards compatibility
|
||||
disco = disassemble # XXX For backwards compatibility
|
||||
|
||||
def findlabels(code):
|
||||
"""Detect all offsets in a byte code which are jump targets.
|
||||
"""Detect all offsets in a byte code which are jump targets.
|
||||
|
||||
Return the list of offsets.
|
||||
Return the list of offsets.
|
||||
|
||||
"""
|
||||
labels = []
|
||||
n = len(code)
|
||||
i = 0
|
||||
while i < n:
|
||||
c = code[i]
|
||||
op = ord(c)
|
||||
i = i+1
|
||||
if op >= HAVE_ARGUMENT:
|
||||
oparg = ord(code[i]) + ord(code[i+1])*256
|
||||
i = i+2
|
||||
label = -1
|
||||
if op in hasjrel:
|
||||
label = i+oparg
|
||||
elif op in hasjabs:
|
||||
label = oparg
|
||||
if label >= 0:
|
||||
if label not in labels:
|
||||
labels.append(label)
|
||||
return labels
|
||||
"""
|
||||
labels = []
|
||||
n = len(code)
|
||||
i = 0
|
||||
while i < n:
|
||||
c = code[i]
|
||||
op = ord(c)
|
||||
i = i+1
|
||||
if op >= HAVE_ARGUMENT:
|
||||
oparg = ord(code[i]) + ord(code[i+1])*256
|
||||
i = i+2
|
||||
label = -1
|
||||
if op in hasjrel:
|
||||
label = i+oparg
|
||||
elif op in hasjabs:
|
||||
label = oparg
|
||||
if label >= 0:
|
||||
if label not in labels:
|
||||
labels.append(label)
|
||||
return labels
|
||||
|
||||
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
|
||||
'is not', 'exception match', 'BAD')
|
||||
|
@ -129,19 +129,19 @@ opname = [''] * 256
|
|||
for op in range(256): opname[op] = '<' + `op` + '>'
|
||||
|
||||
def def_op(name, op):
|
||||
opname[op] = name
|
||||
opname[op] = name
|
||||
|
||||
def name_op(name, op):
|
||||
opname[op] = name
|
||||
hasname.append(op)
|
||||
opname[op] = name
|
||||
hasname.append(op)
|
||||
|
||||
def jrel_op(name, op):
|
||||
opname[op] = name
|
||||
hasjrel.append(op)
|
||||
opname[op] = name
|
||||
hasjrel.append(op)
|
||||
|
||||
def jabs_op(name, op):
|
||||
opname[op] = name
|
||||
hasjabs.append(op)
|
||||
opname[op] = name
|
||||
hasjabs.append(op)
|
||||
|
||||
# Instruction opcodes for compiled code
|
||||
|
||||
|
@ -219,49 +219,49 @@ def_op('POP_BLOCK', 87)
|
|||
def_op('END_FINALLY', 88)
|
||||
def_op('BUILD_CLASS', 89)
|
||||
|
||||
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
|
||||
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
|
||||
|
||||
name_op('STORE_NAME', 90) # Index in name list
|
||||
name_op('DELETE_NAME', 91) # ""
|
||||
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
|
||||
name_op('STORE_NAME', 90) # Index in name list
|
||||
name_op('DELETE_NAME', 91) # ""
|
||||
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
|
||||
|
||||
name_op('STORE_ATTR', 95) # Index in name list
|
||||
name_op('DELETE_ATTR', 96) # ""
|
||||
name_op('STORE_GLOBAL', 97) # ""
|
||||
name_op('DELETE_GLOBAL', 98) # ""
|
||||
def_op('DUP_TOPX', 99) # number of items to duplicate
|
||||
def_op('LOAD_CONST', 100) # Index in const list
|
||||
name_op('STORE_ATTR', 95) # Index in name list
|
||||
name_op('DELETE_ATTR', 96) # ""
|
||||
name_op('STORE_GLOBAL', 97) # ""
|
||||
name_op('DELETE_GLOBAL', 98) # ""
|
||||
def_op('DUP_TOPX', 99) # number of items to duplicate
|
||||
def_op('LOAD_CONST', 100) # Index in const list
|
||||
hasconst.append(100)
|
||||
name_op('LOAD_NAME', 101) # Index in name list
|
||||
def_op('BUILD_TUPLE', 102) # Number of tuple items
|
||||
def_op('BUILD_LIST', 103) # Number of list items
|
||||
def_op('BUILD_MAP', 104) # Always zero for now
|
||||
name_op('LOAD_ATTR', 105) # Index in name list
|
||||
def_op('COMPARE_OP', 106) # Comparison operator
|
||||
name_op('LOAD_NAME', 101) # Index in name list
|
||||
def_op('BUILD_TUPLE', 102) # Number of tuple items
|
||||
def_op('BUILD_LIST', 103) # Number of list items
|
||||
def_op('BUILD_MAP', 104) # Always zero for now
|
||||
name_op('LOAD_ATTR', 105) # Index in name list
|
||||
def_op('COMPARE_OP', 106) # Comparison operator
|
||||
hascompare.append(106)
|
||||
name_op('IMPORT_NAME', 107) # Index in name list
|
||||
name_op('IMPORT_FROM', 108) # Index in name list
|
||||
name_op('IMPORT_NAME', 107) # Index in name list
|
||||
name_op('IMPORT_FROM', 108) # Index in name list
|
||||
|
||||
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
|
||||
jrel_op('JUMP_IF_FALSE', 111) # ""
|
||||
jrel_op('JUMP_IF_TRUE', 112) # ""
|
||||
jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code
|
||||
jrel_op('FOR_LOOP', 114) # Number of bytes to skip
|
||||
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
|
||||
jrel_op('JUMP_IF_FALSE', 111) # ""
|
||||
jrel_op('JUMP_IF_TRUE', 112) # ""
|
||||
jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code
|
||||
jrel_op('FOR_LOOP', 114) # Number of bytes to skip
|
||||
|
||||
name_op('LOAD_GLOBAL', 116) # Index in name list
|
||||
name_op('LOAD_GLOBAL', 116) # Index in name list
|
||||
|
||||
jrel_op('SETUP_LOOP', 120) # Distance to target address
|
||||
jrel_op('SETUP_EXCEPT', 121) # ""
|
||||
jrel_op('SETUP_FINALLY', 122) # ""
|
||||
jrel_op('SETUP_LOOP', 120) # Distance to target address
|
||||
jrel_op('SETUP_EXCEPT', 121) # ""
|
||||
jrel_op('SETUP_FINALLY', 122) # ""
|
||||
|
||||
def_op('LOAD_FAST', 124) # Local variable number
|
||||
def_op('LOAD_FAST', 124) # Local variable number
|
||||
haslocal.append(124)
|
||||
def_op('STORE_FAST', 125) # Local variable number
|
||||
def_op('STORE_FAST', 125) # Local variable number
|
||||
haslocal.append(125)
|
||||
def_op('DELETE_FAST', 126) # Local variable number
|
||||
def_op('DELETE_FAST', 126) # Local variable number
|
||||
haslocal.append(126)
|
||||
|
||||
def_op('SET_LINENO', 127) # Current line number
|
||||
def_op('SET_LINENO', 127) # Current line number
|
||||
SET_LINENO = 127
|
||||
|
||||
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
|
||||
|
@ -277,27 +277,27 @@ def_op('EXTENDED_ARG', 143)
|
|||
EXTENDED_ARG = 143
|
||||
|
||||
def _test():
|
||||
"""Simple test program to disassemble a file."""
|
||||
if sys.argv[1:]:
|
||||
if sys.argv[2:]:
|
||||
sys.stderr.write("usage: python dis.py [-|file]\n")
|
||||
sys.exit(2)
|
||||
fn = sys.argv[1]
|
||||
if not fn or fn == "-":
|
||||
fn = None
|
||||
else:
|
||||
fn = None
|
||||
if not fn:
|
||||
f = sys.stdin
|
||||
else:
|
||||
f = open(fn)
|
||||
source = f.read()
|
||||
if fn:
|
||||
f.close()
|
||||
else:
|
||||
fn = "<stdin>"
|
||||
code = compile(source, fn, "exec")
|
||||
dis(code)
|
||||
"""Simple test program to disassemble a file."""
|
||||
if sys.argv[1:]:
|
||||
if sys.argv[2:]:
|
||||
sys.stderr.write("usage: python dis.py [-|file]\n")
|
||||
sys.exit(2)
|
||||
fn = sys.argv[1]
|
||||
if not fn or fn == "-":
|
||||
fn = None
|
||||
else:
|
||||
fn = None
|
||||
if not fn:
|
||||
f = sys.stdin
|
||||
else:
|
||||
f = open(fn)
|
||||
source = f.read()
|
||||
if fn:
|
||||
f.close()
|
||||
else:
|
||||
fn = "<stdin>"
|
||||
code = compile(source, fn, "exec")
|
||||
dis(code)
|
||||
|
||||
if __name__ == "__main__":
|
||||
_test()
|
||||
_test()
|
||||
|
|
188
Lib/dumbdbm.py
188
Lib/dumbdbm.py
|
@ -28,117 +28,117 @@ _open = __builtin__.open
|
|||
|
||||
_BLOCKSIZE = 512
|
||||
|
||||
error = IOError # For anydbm
|
||||
error = IOError # For anydbm
|
||||
|
||||
class _Database:
|
||||
|
||||
def __init__(self, file):
|
||||
self._dirfile = file + '.dir'
|
||||
self._datfile = file + '.dat'
|
||||
self._bakfile = file + '.bak'
|
||||
# Mod by Jack: create data file if needed
|
||||
try:
|
||||
f = _open(self._datfile, 'r')
|
||||
except IOError:
|
||||
f = _open(self._datfile, 'w')
|
||||
f.close()
|
||||
self._update()
|
||||
def __init__(self, file):
|
||||
self._dirfile = file + '.dir'
|
||||
self._datfile = file + '.dat'
|
||||
self._bakfile = file + '.bak'
|
||||
# Mod by Jack: create data file if needed
|
||||
try:
|
||||
f = _open(self._datfile, 'r')
|
||||
except IOError:
|
||||
f = _open(self._datfile, 'w')
|
||||
f.close()
|
||||
self._update()
|
||||
|
||||
def _update(self):
|
||||
self._index = {}
|
||||
try:
|
||||
f = _open(self._dirfile)
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
while 1:
|
||||
line = f.readline().rstrip()
|
||||
if not line: break
|
||||
key, (pos, siz) = eval(line)
|
||||
self._index[key] = (pos, siz)
|
||||
f.close()
|
||||
def _update(self):
|
||||
self._index = {}
|
||||
try:
|
||||
f = _open(self._dirfile)
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
while 1:
|
||||
line = f.readline().rstrip()
|
||||
if not line: break
|
||||
key, (pos, siz) = eval(line)
|
||||
self._index[key] = (pos, siz)
|
||||
f.close()
|
||||
|
||||
def _commit(self):
|
||||
try: _os.unlink(self._bakfile)
|
||||
except _os.error: pass
|
||||
try: _os.rename(self._dirfile, self._bakfile)
|
||||
except _os.error: pass
|
||||
f = _open(self._dirfile, 'w')
|
||||
for key, (pos, siz) in self._index.items():
|
||||
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
||||
f.close()
|
||||
def _commit(self):
|
||||
try: _os.unlink(self._bakfile)
|
||||
except _os.error: pass
|
||||
try: _os.rename(self._dirfile, self._bakfile)
|
||||
except _os.error: pass
|
||||
f = _open(self._dirfile, 'w')
|
||||
for key, (pos, siz) in self._index.items():
|
||||
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
||||
f.close()
|
||||
|
||||
def __getitem__(self, key):
|
||||
pos, siz = self._index[key] # may raise KeyError
|
||||
f = _open(self._datfile, 'rb')
|
||||
f.seek(pos)
|
||||
dat = f.read(siz)
|
||||
f.close()
|
||||
return dat
|
||||
def __getitem__(self, key):
|
||||
pos, siz = self._index[key] # may raise KeyError
|
||||
f = _open(self._datfile, 'rb')
|
||||
f.seek(pos)
|
||||
dat = f.read(siz)
|
||||
f.close()
|
||||
return dat
|
||||
|
||||
def _addval(self, val):
|
||||
f = _open(self._datfile, 'rb+')
|
||||
f.seek(0, 2)
|
||||
pos = int(f.tell())
|
||||
def _addval(self, val):
|
||||
f = _open(self._datfile, 'rb+')
|
||||
f.seek(0, 2)
|
||||
pos = int(f.tell())
|
||||
## Does not work under MW compiler
|
||||
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
||||
## f.seek(pos)
|
||||
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
||||
f.write('\0'*(npos-pos))
|
||||
pos = npos
|
||||
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
||||
## f.seek(pos)
|
||||
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
|
||||
f.write('\0'*(npos-pos))
|
||||
pos = npos
|
||||
|
||||
f.write(val)
|
||||
f.close()
|
||||
return (pos, len(val))
|
||||
f.write(val)
|
||||
f.close()
|
||||
return (pos, len(val))
|
||||
|
||||
def _setval(self, pos, val):
|
||||
f = _open(self._datfile, 'rb+')
|
||||
f.seek(pos)
|
||||
f.write(val)
|
||||
f.close()
|
||||
return (pos, len(val))
|
||||
def _setval(self, pos, val):
|
||||
f = _open(self._datfile, 'rb+')
|
||||
f.seek(pos)
|
||||
f.write(val)
|
||||
f.close()
|
||||
return (pos, len(val))
|
||||
|
||||
def _addkey(self, key, (pos, siz)):
|
||||
self._index[key] = (pos, siz)
|
||||
f = _open(self._dirfile, 'a')
|
||||
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
||||
f.close()
|
||||
def _addkey(self, key, (pos, siz)):
|
||||
self._index[key] = (pos, siz)
|
||||
f = _open(self._dirfile, 'a')
|
||||
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
|
||||
f.close()
|
||||
|
||||
def __setitem__(self, key, val):
|
||||
if not type(key) == type('') == type(val):
|
||||
raise TypeError, "keys and values must be strings"
|
||||
if not self._index.has_key(key):
|
||||
(pos, siz) = self._addval(val)
|
||||
self._addkey(key, (pos, siz))
|
||||
else:
|
||||
pos, siz = self._index[key]
|
||||
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
|
||||
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
|
||||
if newblocks <= oldblocks:
|
||||
pos, siz = self._setval(pos, val)
|
||||
self._index[key] = pos, siz
|
||||
else:
|
||||
pos, siz = self._addval(val)
|
||||
self._index[key] = pos, siz
|
||||
def __setitem__(self, key, val):
|
||||
if not type(key) == type('') == type(val):
|
||||
raise TypeError, "keys and values must be strings"
|
||||
if not self._index.has_key(key):
|
||||
(pos, siz) = self._addval(val)
|
||||
self._addkey(key, (pos, siz))
|
||||
else:
|
||||
pos, siz = self._index[key]
|
||||
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
|
||||
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
|
||||
if newblocks <= oldblocks:
|
||||
pos, siz = self._setval(pos, val)
|
||||
self._index[key] = pos, siz
|
||||
else:
|
||||
pos, siz = self._addval(val)
|
||||
self._index[key] = pos, siz
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._index[key]
|
||||
self._commit()
|
||||
def __delitem__(self, key):
|
||||
del self._index[key]
|
||||
self._commit()
|
||||
|
||||
def keys(self):
|
||||
return self._index.keys()
|
||||
def keys(self):
|
||||
return self._index.keys()
|
||||
|
||||
def has_key(self, key):
|
||||
return self._index.has_key(key)
|
||||
def has_key(self, key):
|
||||
return self._index.has_key(key)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._index)
|
||||
def __len__(self):
|
||||
return len(self._index)
|
||||
|
||||
def close(self):
|
||||
self._index = None
|
||||
self._datfile = self._dirfile = self._bakfile = None
|
||||
def close(self):
|
||||
self._index = None
|
||||
self._datfile = self._dirfile = self._bakfile = None
|
||||
|
||||
|
||||
def open(file, flag = None, mode = None):
|
||||
# flag, mode arguments are currently ignored
|
||||
return _Database(file)
|
||||
# flag, mode arguments are currently ignored
|
||||
return _Database(file)
|
||||
|
|
|
@ -291,9 +291,9 @@ def cmpfiles(a, b, common, shallow=1, use_statcache=0):
|
|||
|
||||
# Compare two files.
|
||||
# Return:
|
||||
# 0 for equal
|
||||
# 1 for different
|
||||
# 2 for funny cases (can't stat, etc.)
|
||||
# 0 for equal
|
||||
# 1 for different
|
||||
# 2 for funny cases (can't stat, etc.)
|
||||
#
|
||||
def _cmp(a, b, sh, st):
|
||||
try:
|
||||
|
|
120
Lib/fnmatch.py
120
Lib/fnmatch.py
|
@ -15,75 +15,75 @@ import re
|
|||
_cache = {}
|
||||
|
||||
def fnmatch(name, pat):
|
||||
"""Test whether FILENAME matches PATTERN.
|
||||
"""Test whether FILENAME matches PATTERN.
|
||||
|
||||
Patterns are Unix shell style:
|
||||
Patterns are Unix shell style:
|
||||
|
||||
* matches everything
|
||||
? matches any single character
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
* matches everything
|
||||
? matches any single character
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
An initial period in FILENAME is not special.
|
||||
Both FILENAME and PATTERN are first case-normalized
|
||||
if the operating system requires it.
|
||||
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
|
||||
"""
|
||||
An initial period in FILENAME is not special.
|
||||
Both FILENAME and PATTERN are first case-normalized
|
||||
if the operating system requires it.
|
||||
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
|
||||
"""
|
||||
|
||||
import os
|
||||
name = os.path.normcase(name)
|
||||
pat = os.path.normcase(pat)
|
||||
return fnmatchcase(name, pat)
|
||||
import os
|
||||
name = os.path.normcase(name)
|
||||
pat = os.path.normcase(pat)
|
||||
return fnmatchcase(name, pat)
|
||||
|
||||
def fnmatchcase(name, pat):
|
||||
"""Test whether FILENAME matches PATTERN, including case.
|
||||
"""Test whether FILENAME matches PATTERN, including case.
|
||||
|
||||
This is a version of fnmatch() which doesn't case-normalize
|
||||
its arguments.
|
||||
"""
|
||||
This is a version of fnmatch() which doesn't case-normalize
|
||||
its arguments.
|
||||
"""
|
||||
|
||||
if not _cache.has_key(pat):
|
||||
res = translate(pat)
|
||||
_cache[pat] = re.compile(res)
|
||||
return _cache[pat].match(name) is not None
|
||||
if not _cache.has_key(pat):
|
||||
res = translate(pat)
|
||||
_cache[pat] = re.compile(res)
|
||||
return _cache[pat].match(name) is not None
|
||||
|
||||
def translate(pat):
|
||||
"""Translate a shell PATTERN to a regular expression.
|
||||
"""Translate a shell PATTERN to a regular expression.
|
||||
|
||||
There is no way to quote meta-characters.
|
||||
"""
|
||||
There is no way to quote meta-characters.
|
||||
"""
|
||||
|
||||
i, n = 0, len(pat)
|
||||
res = ''
|
||||
while i < n:
|
||||
c = pat[i]
|
||||
i = i+1
|
||||
if c == '*':
|
||||
res = res + '.*'
|
||||
elif c == '?':
|
||||
res = res + '.'
|
||||
elif c == '[':
|
||||
j = i
|
||||
if j < n and pat[j] == '!':
|
||||
j = j+1
|
||||
if j < n and pat[j] == ']':
|
||||
j = j+1
|
||||
while j < n and pat[j] != ']':
|
||||
j = j+1
|
||||
if j >= n:
|
||||
res = res + '\\['
|
||||
else:
|
||||
stuff = pat[i:j]
|
||||
i = j+1
|
||||
if stuff[0] == '!':
|
||||
stuff = '[^' + stuff[1:] + ']'
|
||||
elif stuff == '^'*len(stuff):
|
||||
stuff = '\\^'
|
||||
else:
|
||||
while stuff[0] == '^':
|
||||
stuff = stuff[1:] + stuff[0]
|
||||
stuff = '[' + stuff + ']'
|
||||
res = res + stuff
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
return res + "$"
|
||||
i, n = 0, len(pat)
|
||||
res = ''
|
||||
while i < n:
|
||||
c = pat[i]
|
||||
i = i+1
|
||||
if c == '*':
|
||||
res = res + '.*'
|
||||
elif c == '?':
|
||||
res = res + '.'
|
||||
elif c == '[':
|
||||
j = i
|
||||
if j < n and pat[j] == '!':
|
||||
j = j+1
|
||||
if j < n and pat[j] == ']':
|
||||
j = j+1
|
||||
while j < n and pat[j] != ']':
|
||||
j = j+1
|
||||
if j >= n:
|
||||
res = res + '\\['
|
||||
else:
|
||||
stuff = pat[i:j]
|
||||
i = j+1
|
||||
if stuff[0] == '!':
|
||||
stuff = '[^' + stuff[1:] + ']'
|
||||
elif stuff == '^'*len(stuff):
|
||||
stuff = '\\^'
|
||||
else:
|
||||
while stuff[0] == '^':
|
||||
stuff = stuff[1:] + stuff[0]
|
||||
stuff = '[' + stuff + ']'
|
||||
res = res + stuff
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
return res + "$"
|
||||
|
|
|
@ -138,4 +138,3 @@ def test():
|
|||
print x, fix(x, digs), sci(x, digs)
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
pass
|
||||
|
||||
|
|
1156
Lib/ftplib.py
1156
Lib/ftplib.py
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue