Whitespace normalization.
This commit is contained in:
parent
f655328483
commit
8ac1495a6a
|
@ -37,16 +37,16 @@ except ImportError:
|
|||
__all__ = ["StringIO"]
|
||||
|
||||
class StringIO:
|
||||
"""class StringIO([buffer])
|
||||
|
||||
"""class StringIO([buffer])
|
||||
|
||||
When a StringIO object is created, it can be initialized to an existing
|
||||
string by passing the string to the constructor. If no string is given,
|
||||
the StringIO will start empty.
|
||||
the StringIO will start empty.
|
||||
|
||||
The StringIO object can accept either Unicode or 8-bit strings, but
|
||||
mixing the two may take some care. If both are used, 8-bit strings that
|
||||
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
|
||||
a UnicodeError to be raised when getvalue() is called.
|
||||
a UnicodeError to be raised when getvalue() is called.
|
||||
"""
|
||||
def __init__(self, buf = ''):
|
||||
# Force self.buf to be a string or unicode
|
||||
|
@ -63,7 +63,7 @@ class StringIO:
|
|||
return iter(self.readline, '')
|
||||
|
||||
def close(self):
|
||||
"""Free the memory buffer.
|
||||
"""Free the memory buffer.
|
||||
"""
|
||||
if not self.closed:
|
||||
self.closed = 1
|
||||
|
@ -186,7 +186,7 @@ class StringIO:
|
|||
but mixing the two may take some care. If both are used, 8-bit
|
||||
strings that cannot be interpreted as 7-bit ASCII (that use the
|
||||
8th bit) will cause a UnicodeError to be raised when getvalue()
|
||||
is called.
|
||||
is called.
|
||||
"""
|
||||
if self.buflist:
|
||||
self.buf += ''.join(self.buflist)
|
||||
|
|
|
@ -13,7 +13,7 @@ QP = 1 # Quoted-Printable
|
|||
BASE64 = 2 # Base64
|
||||
|
||||
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
|
||||
MISC_LEN = 7
|
||||
MISC_LEN = 7
|
||||
|
||||
DEFAULT_CHARSET = 'us-ascii'
|
||||
|
||||
|
@ -22,11 +22,11 @@ DEFAULT_CHARSET = 'us-ascii'
|
|||
# Defaults
|
||||
CHARSETS = {
|
||||
# input header enc body enc output conv
|
||||
'iso-8859-1': (QP, QP, None),
|
||||
'iso-8859-1': (QP, QP, None),
|
||||
'iso-8859-2': (QP, QP, None),
|
||||
'us-ascii': (None, None, None),
|
||||
'big5': (BASE64, BASE64, None),
|
||||
'gb2312': (BASE64, BASE64, None),
|
||||
'gb2312': (BASE64, BASE64, None),
|
||||
'euc-jp': (BASE64, None, 'iso-2022-jp'),
|
||||
'shift_jis': (BASE64, None, 'iso-2022-jp'),
|
||||
'iso-2022-jp': (BASE64, None, None),
|
||||
|
@ -125,7 +125,7 @@ class Charset:
|
|||
converting between character sets, given the availability of the
|
||||
applicable codecs. Given an character set, it will do its best to provide
|
||||
information on how to use that character set in an email.
|
||||
|
||||
|
||||
Certain character sets must be encoded with quoted-printable or base64
|
||||
when used in email headers or bodies. Certain character sets must be
|
||||
converted outright, and are not allowed in email. Instances of this
|
||||
|
|
|
@ -61,7 +61,7 @@ def decode_header(header):
|
|||
if not ecre.search(line):
|
||||
decoded.append((line, None))
|
||||
continue
|
||||
|
||||
|
||||
parts = ecre.split(line)
|
||||
while parts:
|
||||
unenc = parts.pop(0).strip()
|
||||
|
@ -149,14 +149,14 @@ class Header:
|
|||
if charset is None:
|
||||
charset = self._charset
|
||||
self._chunks.append((s, charset))
|
||||
|
||||
|
||||
def _split(self, s, charset):
|
||||
# Split up a header safely for use with encode_chunks. BAW: this
|
||||
# appears to be a private convenience method.
|
||||
splittable = charset.to_splittable(s)
|
||||
encoded = charset.from_splittable(splittable)
|
||||
elen = charset.encoded_header_len(encoded)
|
||||
|
||||
|
||||
if elen <= self._maxlinelen:
|
||||
return [(encoded, charset)]
|
||||
# BAW: should we use encoded?
|
||||
|
@ -185,7 +185,7 @@ class Header:
|
|||
Base64 or quoted-printable) header strings. In addition, there is a
|
||||
75-character length limit on any given encoded header field, so
|
||||
line-wrapping must be performed, even with double-byte character sets.
|
||||
|
||||
|
||||
This method will do its best to convert the string to the correct
|
||||
character set used in email, and encode and line wrap it safely with
|
||||
the appropriate scheme for that character set.
|
||||
|
|
|
@ -216,7 +216,7 @@ class Message:
|
|||
def get_charset(self):
|
||||
"""Return the Charset object associated with the message's payload."""
|
||||
return self._charset
|
||||
|
||||
|
||||
#
|
||||
# MAPPING INTERFACE (partial)
|
||||
#
|
||||
|
|
|
@ -88,7 +88,7 @@ def formataddr(pair):
|
|||
"""The inverse of parseaddr(), this takes a 2-tuple of the form
|
||||
(realname, email_address) and returns the string value suitable
|
||||
for an RFC 2822 From:, To: or Cc:.
|
||||
|
||||
|
||||
If the first element of pair is false, then the second element is
|
||||
returned unmodified.
|
||||
"""
|
||||
|
|
|
@ -39,20 +39,20 @@ MISC_LEN = 7
|
|||
# Helpers
|
||||
def base64_len(s):
|
||||
"""Return the length of s when it is encoded with base64."""
|
||||
groups_of_3, leftover = divmod(len(s), 3)
|
||||
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
|
||||
groups_of_3, leftover = divmod(len(s), 3)
|
||||
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
|
||||
# Thanks, Tim!
|
||||
n = groups_of_3 * 4
|
||||
if leftover:
|
||||
n += 4
|
||||
return n
|
||||
n = groups_of_3 * 4
|
||||
if leftover:
|
||||
n += 4
|
||||
return n
|
||||
|
||||
|
||||
|
||||
def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76,
|
||||
eol=NL):
|
||||
"""Encode a single header line with Base64 encoding in a given charset.
|
||||
|
||||
|
||||
Defined in RFC 2045, this Base64 encoding is identical to normal Base64
|
||||
encoding, except that each line must be intelligently wrapped (respecting
|
||||
the Base64 encoding), and subsequent lines must start with a space.
|
||||
|
@ -72,7 +72,7 @@ def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76,
|
|||
|
||||
"=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
|
||||
=?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
|
||||
|
||||
|
||||
with each line wrapped at, at most, maxlinelen characters (defaults to 76
|
||||
characters).
|
||||
"""
|
||||
|
@ -82,7 +82,7 @@ def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76,
|
|||
|
||||
if not keep_eols:
|
||||
header = fix_eols(header)
|
||||
|
||||
|
||||
# Base64 encode each line, in encoded chunks no greater than maxlinelen in
|
||||
# length, after the RFC chrome is added in.
|
||||
base64ed = []
|
||||
|
@ -91,7 +91,7 @@ def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76,
|
|||
|
||||
# BAW: Ben's original code used a step of max_unencoded, but I think it
|
||||
# ought to be max_encoded. Otherwise, where's max_encoded used? I'm
|
||||
# still not sure what the
|
||||
# still not sure what the
|
||||
for i in range(0, len(header), max_unencoded):
|
||||
base64ed.append(b2a_base64(header[i:i+max_unencoded]))
|
||||
|
||||
|
@ -126,10 +126,10 @@ def encode(s, binary=1, maxlinelen=76, eol=NL):
|
|||
"""
|
||||
if not s:
|
||||
return s
|
||||
|
||||
|
||||
if not binary:
|
||||
s = fix_eols(s)
|
||||
|
||||
|
||||
encvec = []
|
||||
max_unencoded = maxlinelen * 3 / 4
|
||||
for i in range(0, len(s), max_unencoded):
|
||||
|
@ -162,7 +162,7 @@ def decode(s, convert_eols=None):
|
|||
"""
|
||||
if not s:
|
||||
return s
|
||||
|
||||
|
||||
dec = a2b_base64(s)
|
||||
if convert_eols:
|
||||
return dec.replace(CRLF, convert_eols)
|
||||
|
|
|
@ -22,7 +22,7 @@ in To:/From:/Cc: etc. fields, as well as Subject: lines.
|
|||
This module does not do the line wrapping or end-of-line character
|
||||
conversion necessary for proper internationalized headers; it only
|
||||
does dumb encoding and decoding. To deal with the various line
|
||||
wrapping issues, use the email.Header module.
|
||||
wrapping issues, use the email.Header module.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
@ -50,7 +50,7 @@ def body_quopri_check(c):
|
|||
"""Return true if the character should be escaped with body quopri."""
|
||||
return bqre.match(c) and 1
|
||||
|
||||
|
||||
|
||||
def header_quopri_len(s):
|
||||
"""Return the length of str when it is encoded with header quopri."""
|
||||
count = 0
|
||||
|
@ -131,7 +131,7 @@ def header_encode(header, charset="iso-8859-1", keep_eols=0, maxlinelen=76,
|
|||
# lenght, after the RFC chrome is added in.
|
||||
quoted = []
|
||||
max_encoded = maxlinelen - len(charset) - MISC_LEN
|
||||
|
||||
|
||||
for c in header:
|
||||
# Space may be represented as _ instead of =20 for readability
|
||||
if c == ' ':
|
||||
|
@ -187,7 +187,7 @@ def encode(body, binary=0, maxlinelen=76, eol=NL):
|
|||
line = line[:-2]
|
||||
elif line[-1] in CRLF:
|
||||
line = line[:-1]
|
||||
|
||||
|
||||
lineno += 1
|
||||
encoded_line = ''
|
||||
prev = None
|
||||
|
|
|
@ -94,7 +94,7 @@ def input(files=None, inplace=0, backup="", bufsize=0):
|
|||
Create an instance of the FileInput class. The instance will be used
|
||||
as global state for the functions of this module, and is also returned
|
||||
to use during iteration. The parameters to this function will be passed
|
||||
along to the constructor of the FileInput class.
|
||||
along to the constructor of the FileInput class.
|
||||
"""
|
||||
global _state
|
||||
if _state and _state._file:
|
||||
|
@ -118,7 +118,7 @@ def nextfile():
|
|||
changed until after the first line of the next file has been read.
|
||||
Before the first line has been read, this function has no effect;
|
||||
it cannot be used to skip the first file. After the last line of the
|
||||
last file has been read, this function has no effect.
|
||||
last file has been read, this function has no effect.
|
||||
"""
|
||||
if not _state:
|
||||
raise RuntimeError, "no active input()"
|
||||
|
@ -127,7 +127,7 @@ def nextfile():
|
|||
def filename():
|
||||
"""
|
||||
Return the name of the file currently being read.
|
||||
Before the first line has been read, returns None.
|
||||
Before the first line has been read, returns None.
|
||||
"""
|
||||
if not _state:
|
||||
raise RuntimeError, "no active input()"
|
||||
|
@ -137,7 +137,7 @@ def lineno():
|
|||
"""
|
||||
Return the cumulative line number of the line that has just been read.
|
||||
Before the first line has been read, returns 0. After the last line
|
||||
of the last file has been read, returns the line number of that line.
|
||||
of the last file has been read, returns the line number of that line.
|
||||
"""
|
||||
if not _state:
|
||||
raise RuntimeError, "no active input()"
|
||||
|
@ -147,7 +147,7 @@ def filelineno():
|
|||
"""
|
||||
Return the line number in the current file. Before the first line
|
||||
has been read, returns 0. After the last line of the last file has
|
||||
been read, returns the line number of that line within the file.
|
||||
been read, returns the line number of that line within the file.
|
||||
"""
|
||||
if not _state:
|
||||
raise RuntimeError, "no active input()"
|
||||
|
@ -156,7 +156,7 @@ def filelineno():
|
|||
def isfirstline():
|
||||
"""
|
||||
Returns true the line just read is the first line of its file,
|
||||
otherwise returns false.
|
||||
otherwise returns false.
|
||||
"""
|
||||
if not _state:
|
||||
raise RuntimeError, "no active input()"
|
||||
|
@ -165,7 +165,7 @@ def isfirstline():
|
|||
def isstdin():
|
||||
"""
|
||||
Returns true if the last line was read from sys.stdin,
|
||||
otherwise returns false.
|
||||
otherwise returns false.
|
||||
"""
|
||||
if not _state:
|
||||
raise RuntimeError, "no active input()"
|
||||
|
@ -173,14 +173,14 @@ def isstdin():
|
|||
|
||||
class FileInput:
|
||||
"""class FileInput([files[, inplace[, backup]]])
|
||||
|
||||
|
||||
Class FileInput is the implementation of the module; its methods
|
||||
filename(), lineno(), fileline(), isfirstline(), isstdin(), nextfile()
|
||||
and close() correspond to the functions of the same name in the module.
|
||||
In addition it has a readline() method which returns the next
|
||||
input line, and a __getitem__() method which implements the
|
||||
sequence behavior. The sequence must be accessed in strictly
|
||||
sequential order; random access and readline() cannot be mixed.
|
||||
sequential order; random access and readline() cannot be mixed.
|
||||
"""
|
||||
|
||||
def __init__(self, files=None, inplace=0, backup="", bufsize=0):
|
||||
|
|
|
@ -238,16 +238,16 @@ def abspath(path):
|
|||
|
||||
# realpath is a no-op on systems without islink support
|
||||
def realpath(path):
|
||||
path = abspath(path)
|
||||
try:
|
||||
import macfs
|
||||
except ImportError:
|
||||
return path
|
||||
if not path:
|
||||
return path
|
||||
components = path.split(':')
|
||||
path = components[0] + ':'
|
||||
for c in components[1:]:
|
||||
path = join(path, c)
|
||||
path = macfs.ResolveAliasFile(path)[0].as_pathname()
|
||||
return path
|
||||
path = abspath(path)
|
||||
try:
|
||||
import macfs
|
||||
except ImportError:
|
||||
return path
|
||||
if not path:
|
||||
return path
|
||||
components = path.split(':')
|
||||
path = components[0] + ':'
|
||||
for c in components[1:]:
|
||||
path = join(path, c)
|
||||
path = macfs.ResolveAliasFile(path)[0].as_pathname()
|
||||
return path
|
||||
|
|
|
@ -865,7 +865,7 @@ class Unpickler:
|
|||
import warnings
|
||||
warnings.warn("The None return argument form of __reduce__ is "
|
||||
"deprecated. Return a tuple of arguments instead.",
|
||||
DeprecationWarning)
|
||||
DeprecationWarning)
|
||||
value = callable.__basicnew__()
|
||||
else:
|
||||
value = apply(callable, arg_tup)
|
||||
|
|
|
@ -445,14 +445,14 @@ class Random:
|
|||
## -------------------- gamma distribution --------------------
|
||||
|
||||
def gammavariate(self, alpha, beta):
|
||||
|
||||
|
||||
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
|
||||
|
||||
|
||||
# Warning: a few older sources define the gamma distribution in terms
|
||||
# of alpha > -1.0
|
||||
if alpha <= 0.0 or beta <= 0.0:
|
||||
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
|
||||
|
||||
|
||||
random = self.random
|
||||
if alpha > 1.0:
|
||||
|
||||
|
@ -463,7 +463,7 @@ class Random:
|
|||
ainv = _sqrt(2.0 * alpha - 1.0)
|
||||
bbb = alpha - LOG4
|
||||
ccc = alpha + ainv
|
||||
|
||||
|
||||
while 1:
|
||||
u1 = random()
|
||||
u2 = random()
|
||||
|
@ -630,7 +630,7 @@ def _test(N=20000):
|
|||
_test_generator(N, 'vonmisesvariate(0.0, 1.0)')
|
||||
_test_generator(N, 'gammavariate(0.01, 1.0)')
|
||||
_test_generator(N, 'gammavariate(0.1, 1.0)')
|
||||
_test_generator(N, 'gammavariate(0.1, 2.0)')
|
||||
_test_generator(N, 'gammavariate(0.1, 2.0)')
|
||||
_test_generator(N, 'gammavariate(0.5, 1.0)')
|
||||
_test_generator(N, 'gammavariate(0.9, 1.0)')
|
||||
_test_generator(N, 'gammavariate(1.0, 1.0)')
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
|
||||
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
|
||||
|
||||
tabnanny -- Detection of ambiguous indentation
|
||||
tabnanny -- Detection of ambiguous indentation
|
||||
|
||||
For the time being this module is intended to be called as a script.
|
||||
However it is possible to import it into an IDE and use the function
|
||||
check() described below.
|
||||
check() described below.
|
||||
|
||||
Warning: The API provided by this module is likely to change in future
|
||||
releases; such changes may not be backward compatible.
|
||||
releases; such changes may not be backward compatible.
|
||||
"""
|
||||
|
||||
# Released to the public domain, by Tim Peters, 15 April 1998.
|
||||
|
@ -60,7 +60,7 @@ def main():
|
|||
class NannyNag(Exception):
|
||||
"""
|
||||
Raised by tokeneater() if detecting an ambiguous indent.
|
||||
Captured and handled in check().
|
||||
Captured and handled in check().
|
||||
"""
|
||||
def __init__(self, lineno, msg, line):
|
||||
self.lineno, self.msg, self.line = lineno, msg, line
|
||||
|
@ -73,14 +73,14 @@ class NannyNag(Exception):
|
|||
|
||||
def check(file):
|
||||
"""check(file_or_dir)
|
||||
|
||||
|
||||
If file_or_dir is a directory and not a symbolic link, then recursively
|
||||
descend the directory tree named by file_or_dir, checking all .py files
|
||||
along the way. If file_or_dir is an ordinary Python source file, it is
|
||||
checked for whitespace related problems. The diagnostic messages are
|
||||
written to standard output using the print statement.
|
||||
written to standard output using the print statement.
|
||||
"""
|
||||
|
||||
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print "%s: listing directory" % `file`
|
||||
|
|
|
@ -177,13 +177,13 @@ def run_method_tests(test):
|
|||
|
||||
# strip/lstrip/rstrip with unicode arg
|
||||
if have_unicode:
|
||||
test('strip', 'xyzzyhelloxyzzy',
|
||||
test('strip', 'xyzzyhelloxyzzy',
|
||||
unicode('hello', 'ascii'), unicode('xyz', 'ascii'))
|
||||
test('lstrip', 'xyzzyhelloxyzzy',
|
||||
test('lstrip', 'xyzzyhelloxyzzy',
|
||||
unicode('helloxyzzy', 'ascii'), unicode('xyz', 'ascii'))
|
||||
test('rstrip', 'xyzzyhelloxyzzy',
|
||||
unicode('xyzzyhello', 'ascii'), unicode('xyz', 'ascii'))
|
||||
test('strip', 'hello',
|
||||
test('strip', 'hello',
|
||||
unicode('hello', 'ascii'), unicode('xyz', 'ascii'))
|
||||
|
||||
test('swapcase', 'HeLLo cOmpUteRs', 'hEllO CoMPuTErS')
|
||||
|
|
|
@ -6,20 +6,20 @@ from binascii import Error as binascii_error
|
|||
class Base64TestCase(unittest.TestCase):
|
||||
def test_encode_string(self):
|
||||
"""Testing encode string"""
|
||||
test_support.verify(base64.encodestring("www.python.org") ==
|
||||
"d3d3LnB5dGhvbi5vcmc=\n",
|
||||
test_support.verify(base64.encodestring("www.python.org") ==
|
||||
"d3d3LnB5dGhvbi5vcmc=\n",
|
||||
reason="www.python.org encodestring failed")
|
||||
test_support.verify(base64.encodestring("a") ==
|
||||
"YQ==\n",
|
||||
test_support.verify(base64.encodestring("a") ==
|
||||
"YQ==\n",
|
||||
reason="a encodestring failed")
|
||||
test_support.verify(base64.encodestring("ab") ==
|
||||
"YWI=\n",
|
||||
test_support.verify(base64.encodestring("ab") ==
|
||||
"YWI=\n",
|
||||
reason="ab encodestring failed")
|
||||
test_support.verify(base64.encodestring("abc") ==
|
||||
"YWJj\n",
|
||||
test_support.verify(base64.encodestring("abc") ==
|
||||
"YWJj\n",
|
||||
reason="abc encodestring failed")
|
||||
test_support.verify(base64.encodestring("") ==
|
||||
"",
|
||||
test_support.verify(base64.encodestring("") ==
|
||||
"",
|
||||
reason="null encodestring failed")
|
||||
test_support.verify(base64.encodestring(
|
||||
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}") ==
|
||||
|
@ -29,16 +29,16 @@ class Base64TestCase(unittest.TestCase):
|
|||
def test_decode_string(self):
|
||||
"""Testing decode string"""
|
||||
test_support.verify(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n") ==
|
||||
"www.python.org",
|
||||
"www.python.org",
|
||||
reason="www.python.org decodestring failed")
|
||||
test_support.verify(base64.decodestring("YQ==\n") ==
|
||||
"a",
|
||||
"a",
|
||||
reason="a decodestring failed")
|
||||
test_support.verify(base64.decodestring("YWI=\n") ==
|
||||
"ab",
|
||||
"ab",
|
||||
reason="ab decodestring failed")
|
||||
test_support.verify(base64.decodestring("YWJj\n") ==
|
||||
"abc",
|
||||
"abc",
|
||||
reason="abc decodestring failed")
|
||||
test_support.verify(base64.decodestring(
|
||||
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n") ==
|
||||
|
@ -50,10 +50,9 @@ class Base64TestCase(unittest.TestCase):
|
|||
pass
|
||||
else:
|
||||
self.fail("expected a binascii.Error on null decode request")
|
||||
|
||||
|
||||
def test_main():
|
||||
test_support.run_unittest(Base64TestCase)
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_main()
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ else:
|
|||
|
||||
# Verify the treatment of Unicode strings
|
||||
if have_unicode:
|
||||
verify(binascii.hexlify(unicode('a', 'ascii')) == '61',
|
||||
verify(binascii.hexlify(unicode('a', 'ascii')) == '61',
|
||||
"hexlify failed for Unicode")
|
||||
|
||||
# A test for SF bug 534347 (segfaults without the proper fix)
|
||||
|
|
|
@ -104,7 +104,7 @@ class TestIsSubclassExceptions(unittest.TestCase):
|
|||
__bases__ = property(getbases)
|
||||
|
||||
class S(C): pass
|
||||
|
||||
|
||||
self.assertRaises(TypeError, issubclass, C(), S())
|
||||
|
||||
# Like above, but test the second branch, where the __bases__ of the
|
||||
|
@ -176,7 +176,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase):
|
|||
# combinations.
|
||||
|
||||
def test_isinstance_normal(self):
|
||||
# normal instances
|
||||
# normal instances
|
||||
self.assertEqual(True, isinstance(Super(), Super))
|
||||
self.assertEqual(False, isinstance(Super(), Child))
|
||||
self.assertEqual(False, isinstance(Super(), AbstractSuper))
|
||||
|
@ -186,7 +186,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase):
|
|||
self.assertEqual(False, isinstance(Child(), AbstractSuper))
|
||||
|
||||
def test_isinstance_abstract(self):
|
||||
# abstract instances
|
||||
# abstract instances
|
||||
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
|
||||
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
|
||||
self.assertEqual(False, isinstance(AbstractSuper(), Super))
|
||||
|
@ -196,7 +196,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase):
|
|||
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
|
||||
self.assertEqual(False, isinstance(AbstractChild(), Super))
|
||||
self.assertEqual(False, isinstance(AbstractChild(), Child))
|
||||
|
||||
|
||||
def test_subclass_normal(self):
|
||||
# normal classes
|
||||
self.assertEqual(True, issubclass(Super, Super))
|
||||
|
@ -217,7 +217,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase):
|
|||
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
|
||||
self.assertEqual(False, issubclass(AbstractChild, Super))
|
||||
self.assertEqual(False, issubclass(AbstractChild, Child))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ testit('pow(2,-1)', math.pow(2,-1), 0.5)
|
|||
print 'radians'
|
||||
testit('radians(180)', math.radians(180), math.pi)
|
||||
testit('radians(90)', math.radians(90), math.pi/2)
|
||||
testit('radians(-45)', math.radians(-45), -math.pi/4)
|
||||
testit('radians(-45)', math.radians(-45), -math.pi/4)
|
||||
|
||||
print 'sin'
|
||||
testit('sin(0)', math.sin(0), 0)
|
||||
|
|
|
@ -38,7 +38,7 @@ def test(name, input, output, *args):
|
|||
value = apply(f, (input,) + args)
|
||||
if value is input:
|
||||
if verbose:
|
||||
print 'no'
|
||||
print 'no'
|
||||
print '*',f, `input`, `output`, `value`
|
||||
return
|
||||
if value != output:
|
||||
|
|
|
@ -68,7 +68,7 @@ def test(method, input, output, *args):
|
|||
exc = sys.exc_info()[:2]
|
||||
if value is input:
|
||||
if verbose:
|
||||
print 'no'
|
||||
print 'no'
|
||||
print '*',f, `input`, `output`, `value`
|
||||
return
|
||||
if value != output or type(value) is not type(output):
|
||||
|
|
|
@ -124,14 +124,14 @@ def tokenize(readline, tokeneater=printtoken):
|
|||
"""
|
||||
The tokenize() function accepts two parameters: one representing the
|
||||
input stream, and one providing an output mechanism for tokenize().
|
||||
|
||||
|
||||
The first parameter, readline, must be a callable object which provides
|
||||
the same interface as the readline() method of built-in file objects.
|
||||
Each call to the function should return one line of input as a string.
|
||||
Each call to the function should return one line of input as a string.
|
||||
|
||||
The second parameter, tokeneater, must also be a callable object. It is
|
||||
called once for each token, with five arguments, corresponding to the
|
||||
tuples generated by generate_tokens().
|
||||
tuples generated by generate_tokens().
|
||||
"""
|
||||
try:
|
||||
tokenize_loop(readline, tokeneater)
|
||||
|
@ -149,13 +149,13 @@ def generate_tokens(readline):
|
|||
must be a callable object which provides the same interface as the
|
||||
readline() method of built-in file objects. Each call to the function
|
||||
should return one line of input as a string.
|
||||
|
||||
|
||||
The generator produces 5-tuples with these members: the token type; the
|
||||
token string; a 2-tuple (srow, scol) of ints specifying the row and
|
||||
column where the token begins in the source; a 2-tuple (erow, ecol) of
|
||||
ints specifying the row and column where the token ends in the source;
|
||||
and the line on which the token was found. The line passed is the
|
||||
logical line; continuation lines are included.
|
||||
logical line; continuation lines are included.
|
||||
"""
|
||||
lnum = parenlev = continued = 0
|
||||
namechars, numchars = string.ascii_letters + '_', '0123456789'
|
||||
|
|
Loading…
Reference in New Issue