2003-05-04 18:15:27 -03:00
|
|
|
"""Supporting definitions for the Python regression tests."""
|
2000-07-24 03:55:00 -03:00
|
|
|
|
2002-07-30 20:27:12 -03:00
|
|
|
if __name__ != 'test.test_support':
|
2007-04-25 15:54:36 -03:00
|
|
|
raise ImportError('test_support must be imported from the test package')
|
2002-07-30 20:27:12 -03:00
|
|
|
|
2007-03-12 23:34:09 -03:00
|
|
|
import contextlib
|
|
|
|
import errno
|
2009-05-14 19:40:34 -03:00
|
|
|
import functools
|
2009-05-31 11:43:00 -03:00
|
|
|
import gc
|
2007-03-12 23:34:09 -03:00
|
|
|
import socket
|
2001-03-21 14:26:33 -04:00
|
|
|
import sys
|
2007-04-25 15:54:36 -03:00
|
|
|
import os
|
2009-03-26 16:58:18 -03:00
|
|
|
import platform
|
2008-02-24 14:47:03 -04:00
|
|
|
import shutil
|
2006-12-13 19:09:53 -04:00
|
|
|
import warnings
|
2007-04-25 15:54:36 -03:00
|
|
|
import unittest
|
2009-03-30 20:05:48 -03:00
|
|
|
import importlib
|
2009-05-01 14:35:37 -03:00
|
|
|
import UserDict
|
2010-03-07 08:18:33 -04:00
|
|
|
import re
|
2010-04-10 16:39:07 -03:00
|
|
|
import time
|
2010-04-27 18:46:03 -03:00
|
|
|
try:
|
|
|
|
import thread
|
|
|
|
except ImportError:
|
|
|
|
thread = None
|
2001-03-21 14:26:33 -04:00
|
|
|
|
2009-03-26 17:48:25 -03:00
|
|
|
__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
|
2008-05-26 13:22:27 -03:00
|
|
|
"verbose", "use_resources", "max_memuse", "record_original_stdout",
|
|
|
|
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
|
|
|
|
"is_resource_enabled", "requires", "find_unused_port", "bind_port",
|
|
|
|
"fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
|
2010-02-10 17:40:33 -04:00
|
|
|
"SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
|
2010-03-07 08:18:33 -04:00
|
|
|
"open_urlresource", "check_warnings", "check_py3k_warnings",
|
|
|
|
"CleanImport", "EnvironmentVarGuard", "captured_output",
|
2008-05-26 13:22:27 -03:00
|
|
|
"captured_stdout", "TransientResource", "transient_internet",
|
|
|
|
"run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
|
|
|
|
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
|
2009-03-26 16:58:18 -03:00
|
|
|
"threading_cleanup", "reap_children", "cpython_only",
|
2009-06-12 17:14:08 -03:00
|
|
|
"check_impl_detail", "get_attribute", "py3k_bytes"]
|
2008-05-26 13:22:27 -03:00
|
|
|
|
2010-03-15 15:08:58 -03:00
|
|
|
|
2000-07-24 03:55:00 -03:00
|
|
|
class Error(Exception):
|
2000-10-23 14:22:08 -03:00
|
|
|
"""Base class for regression test exceptions."""
|
2000-07-24 03:55:00 -03:00
|
|
|
|
|
|
|
class TestFailed(Error):
|
2000-10-23 14:22:08 -03:00
|
|
|
"""Test failed."""
|
2000-07-24 03:55:00 -03:00
|
|
|
|
2009-03-26 17:49:40 -03:00
|
|
|
class ResourceDenied(unittest.SkipTest):
|
2003-02-03 11:19:30 -04:00
|
|
|
"""Test skipped because it requested a disallowed resource.
|
|
|
|
|
|
|
|
This is raised when a test calls requires() for a resource that
|
2010-04-27 18:46:03 -03:00
|
|
|
has not been enabled. It is used to distinguish between expected
|
2003-02-03 11:19:30 -04:00
|
|
|
and unexpected skips.
|
|
|
|
"""
|
|
|
|
|
2009-04-11 10:31:31 -03:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def _ignore_deprecated_imports(ignore=True):
|
|
|
|
"""Context manager to suppress package and module deprecation
|
|
|
|
warnings when importing them.
|
|
|
|
|
|
|
|
If ignore is False, this context manager has no effect."""
|
|
|
|
if ignore:
|
|
|
|
with warnings.catch_warnings():
|
|
|
|
warnings.filterwarnings("ignore", ".+ (module|package)",
|
|
|
|
DeprecationWarning)
|
|
|
|
yield
|
|
|
|
else:
|
|
|
|
yield
|
|
|
|
|
|
|
|
|
2008-05-08 16:26:08 -03:00
|
|
|
def import_module(name, deprecated=False):
|
2009-03-31 16:33:15 -03:00
|
|
|
"""Import and return the module to be tested, raising SkipTest if
|
|
|
|
it is not available.
|
|
|
|
|
|
|
|
If deprecated is True, any module or package deprecation messages
|
|
|
|
will be suppressed."""
|
2009-04-11 10:31:31 -03:00
|
|
|
with _ignore_deprecated_imports(deprecated):
|
2008-05-08 16:26:08 -03:00
|
|
|
try:
|
2009-04-11 10:31:31 -03:00
|
|
|
return importlib.import_module(name)
|
2009-03-31 15:32:17 -03:00
|
|
|
except ImportError, msg:
|
|
|
|
raise unittest.SkipTest(str(msg))
|
2009-04-11 10:31:31 -03:00
|
|
|
|
|
|
|
|
2009-04-22 12:26:04 -03:00
|
|
|
def _save_and_remove_module(name, orig_modules):
|
|
|
|
"""Helper function to save and remove a module from sys.modules
|
|
|
|
|
|
|
|
Return value is True if the module was in sys.modules and
|
|
|
|
False otherwise."""
|
|
|
|
saved = True
|
|
|
|
try:
|
|
|
|
orig_modules[name] = sys.modules[name]
|
|
|
|
except KeyError:
|
|
|
|
saved = False
|
|
|
|
else:
|
|
|
|
del sys.modules[name]
|
|
|
|
return saved
|
|
|
|
|
|
|
|
|
|
|
|
def _save_and_block_module(name, orig_modules):
|
|
|
|
"""Helper function to save and block a module in sys.modules
|
|
|
|
|
|
|
|
Return value is True if the module was in sys.modules and
|
|
|
|
False otherwise."""
|
|
|
|
saved = True
|
|
|
|
try:
|
|
|
|
orig_modules[name] = sys.modules[name]
|
|
|
|
except KeyError:
|
|
|
|
saved = False
|
|
|
|
sys.modules[name] = 0
|
|
|
|
return saved
|
|
|
|
|
|
|
|
|
|
|
|
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
|
2009-04-11 10:31:31 -03:00
|
|
|
"""Imports and returns a module, deliberately bypassing the sys.modules cache
|
|
|
|
and importing a fresh copy of the module. Once the import is complete,
|
|
|
|
the sys.modules cache is restored to its original state.
|
|
|
|
|
2009-04-22 12:26:04 -03:00
|
|
|
Modules named in fresh are also imported anew if needed by the import.
|
|
|
|
|
|
|
|
Importing of modules named in blocked is prevented while the fresh import
|
2009-04-11 10:31:31 -03:00
|
|
|
takes place.
|
|
|
|
|
|
|
|
If deprecated is True, any module or package deprecation messages
|
|
|
|
will be suppressed."""
|
|
|
|
# NOTE: test_heapq and test_warnings include extra sanity checks to make
|
|
|
|
# sure that this utility function is working as expected
|
|
|
|
with _ignore_deprecated_imports(deprecated):
|
2009-04-22 12:26:04 -03:00
|
|
|
# Keep track of modules saved for later restoration as well
|
|
|
|
# as those which just need a blocking entry removed
|
2009-04-11 10:31:31 -03:00
|
|
|
orig_modules = {}
|
2009-04-22 12:26:04 -03:00
|
|
|
names_to_remove = []
|
|
|
|
_save_and_remove_module(name, orig_modules)
|
2009-04-11 10:31:31 -03:00
|
|
|
try:
|
2009-04-22 12:26:04 -03:00
|
|
|
for fresh_name in fresh:
|
|
|
|
_save_and_remove_module(fresh_name, orig_modules)
|
|
|
|
for blocked_name in blocked:
|
|
|
|
if not _save_and_block_module(blocked_name, orig_modules):
|
|
|
|
names_to_remove.append(blocked_name)
|
|
|
|
fresh_module = importlib.import_module(name)
|
2009-04-11 10:31:31 -03:00
|
|
|
finally:
|
2009-04-22 12:26:04 -03:00
|
|
|
for orig_name, module in orig_modules.items():
|
|
|
|
sys.modules[orig_name] = module
|
|
|
|
for name_to_remove in names_to_remove:
|
|
|
|
del sys.modules[name_to_remove]
|
|
|
|
return fresh_module
|
2009-04-11 10:31:31 -03:00
|
|
|
|
2008-05-08 16:26:08 -03:00
|
|
|
|
2009-03-31 16:33:15 -03:00
|
|
|
def get_attribute(obj, name):
|
|
|
|
"""Get an attribute, raising SkipTest if AttributeError is raised."""
|
|
|
|
try:
|
|
|
|
attribute = getattr(obj, name)
|
|
|
|
except AttributeError:
|
|
|
|
raise unittest.SkipTest("module %s has no attribute %s" % (
|
|
|
|
obj.__name__, name))
|
|
|
|
else:
|
|
|
|
return attribute
|
2009-03-30 16:04:00 -03:00
|
|
|
|
|
|
|
|
2001-08-20 19:29:23 -03:00
|
|
|
verbose = 1 # Flag set to 0 by regrtest.py
|
2006-04-26 12:53:30 -03:00
|
|
|
use_resources = None # Flag set to [] by regrtest.py
|
|
|
|
max_memuse = 0 # Disable bigmem tests (they will still be run with
|
|
|
|
# small sizes, to make sure they work.)
|
2008-07-31 14:17:14 -03:00
|
|
|
real_max_memuse = 0
|
1996-12-19 22:58:22 -04:00
|
|
|
|
2001-09-25 17:05:11 -03:00
|
|
|
# _original_stdout is meant to hold stdout at the time regrtest began.
|
|
|
|
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
|
|
|
|
# The point is to have some flavor of stdout the user can actually see.
|
|
|
|
_original_stdout = None
|
|
|
|
def record_original_stdout(stdout):
|
|
|
|
global _original_stdout
|
|
|
|
_original_stdout = stdout
|
|
|
|
|
|
|
|
def get_original_stdout():
|
|
|
|
return _original_stdout or sys.stdout
|
|
|
|
|
1992-01-27 13:00:37 -04:00
|
|
|
def unload(name):
|
2000-10-23 14:22:08 -03:00
|
|
|
try:
|
|
|
|
del sys.modules[name]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
1992-01-27 13:00:37 -04:00
|
|
|
|
2006-01-23 03:51:27 -04:00
|
|
|
def unlink(filename):
|
|
|
|
try:
|
|
|
|
os.unlink(filename)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2008-02-24 14:47:03 -04:00
|
|
|
def rmtree(path):
|
|
|
|
try:
|
|
|
|
shutil.rmtree(path)
|
|
|
|
except OSError, e:
|
|
|
|
# Unix returns ENOENT, Windows returns ESRCH.
|
|
|
|
if e.errno not in (errno.ENOENT, errno.ESRCH):
|
|
|
|
raise
|
|
|
|
|
1992-01-27 13:00:37 -04:00
|
|
|
def forget(modname):
|
2003-05-04 18:15:27 -03:00
|
|
|
'''"Forget" a module was ever imported by removing it from sys.modules and
|
|
|
|
deleting any .pyc and .pyo files.'''
|
2000-10-23 14:22:08 -03:00
|
|
|
unload(modname)
|
|
|
|
for dirname in sys.path:
|
2006-01-23 03:51:27 -04:00
|
|
|
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
|
2003-05-04 18:15:27 -03:00
|
|
|
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
|
|
|
|
# the chance exists that there is no .pyc (and thus the 'try' statement
|
|
|
|
# is exited) but there is a .pyo file.
|
2006-01-23 03:51:27 -04:00
|
|
|
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
|
1992-01-27 13:00:37 -04:00
|
|
|
|
2002-12-03 23:26:57 -04:00
|
|
|
def is_resource_enabled(resource):
|
2003-05-04 18:15:27 -03:00
|
|
|
"""Test whether a resource is enabled. Known resources are set by
|
|
|
|
regrtest.py."""
|
2002-12-03 23:26:57 -04:00
|
|
|
return use_resources is not None and resource in use_resources
|
|
|
|
|
2001-08-20 19:29:23 -03:00
|
|
|
def requires(resource, msg=None):
|
2003-05-04 18:15:27 -03:00
|
|
|
"""Raise ResourceDenied if the specified resource is not available.
|
|
|
|
|
|
|
|
If the caller's module is __main__ then automatically return True. The
|
|
|
|
possibility of False being returned occurs when regrtest.py is executing."""
|
2003-04-24 16:06:57 -03:00
|
|
|
# see if the caller's module is __main__ - if so, treat as if
|
|
|
|
# the resource was set
|
2009-03-25 18:42:51 -03:00
|
|
|
if sys._getframe(1).f_globals.get("__name__") == "__main__":
|
2003-04-24 16:06:57 -03:00
|
|
|
return
|
2002-12-03 23:26:57 -04:00
|
|
|
if not is_resource_enabled(resource):
|
2001-08-20 19:29:23 -03:00
|
|
|
if msg is None:
|
|
|
|
msg = "Use of the `%s' resource not enabled" % resource
|
2003-02-03 11:19:30 -04:00
|
|
|
raise ResourceDenied(msg)
|
2001-08-20 19:29:23 -03:00
|
|
|
|
- Issue #2550: The approach used by client/server code for obtaining ports
to listen on in network-oriented tests has been refined in an effort to
facilitate running multiple instances of the entire regression test suite
in parallel without issue. test_support.bind_port() has been fixed such
that it will always return a unique port -- which wasn't always the case
with the previous implementation, especially if socket options had been
set that affected address reuse (i.e. SO_REUSEADDR, SO_REUSEPORT). The
new implementation of bind_port() will actually raise an exception if it
is passed an AF_INET/SOCK_STREAM socket with either the SO_REUSEADDR or
SO_REUSEPORT socket option set. Furthermore, if available, bind_port()
will set the SO_EXCLUSIVEADDRUSE option on the socket it's been passed.
This currently only applies to Windows. This option prevents any other
sockets from binding to the host/port we've bound to, thus removing the
possibility of the 'non-deterministic' behaviour, as Microsoft puts it,
that occurs when a second SOCK_STREAM socket binds and accepts to a
host/port that's already been bound by another socket. The optional
preferred port parameter to bind_port() has been removed. Under no
circumstances should tests be hard coding ports!
test_support.find_unused_port() has also been introduced, which will pass
a temporary socket object to bind_port() in order to obtain an unused port.
The temporary socket object is then closed and deleted, and the port is
returned. This method should only be used for obtaining an unused port
in order to pass to an external program (i.e. the -accept [port] argument
to openssl's s_server mode) or as a parameter to a server-oriented class
that doesn't give you direct access to the underlying socket used.
Finally, test_support.HOST has been introduced, which should be used for
the host argument of any relevant socket calls (i.e. bind and connect).
The following tests were updated to following the new conventions:
test_socket, test_smtplib, test_asyncore, test_ssl, test_httplib,
test_poplib, test_ftplib, test_telnetlib, test_socketserver,
test_asynchat and test_socket_ssl.
It is now possible for multiple instances of the regression test suite to
run in parallel without issue.
2008-04-08 20:47:30 -03:00
|
|
|
HOST = 'localhost'
|
|
|
|
|
|
|
|
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
|
|
|
|
"""Returns an unused port that should be suitable for binding. This is
|
|
|
|
achieved by creating a temporary socket with the same family and type as
|
|
|
|
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
|
|
|
|
the specified host address (defaults to 0.0.0.0) with the port set to 0,
|
|
|
|
eliciting an unused ephemeral port from the OS. The temporary socket is
|
|
|
|
then closed and deleted, and the ephemeral port is returned.
|
|
|
|
|
|
|
|
Either this method or bind_port() should be used for any tests where a
|
|
|
|
server socket needs to be bound to a particular port for the duration of
|
|
|
|
the test. Which one to use depends on whether the calling code is creating
|
|
|
|
a python socket, or if an unused port needs to be provided in a constructor
|
|
|
|
or passed to an external program (i.e. the -accept argument to openssl's
|
|
|
|
s_server mode). Always prefer bind_port() over find_unused_port() where
|
|
|
|
possible. Hard coded ports should *NEVER* be used. As soon as a server
|
|
|
|
socket is bound to a hard coded port, the ability to run multiple instances
|
|
|
|
of the test simultaneously on the same host is compromised, which makes the
|
|
|
|
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
|
|
|
|
may simply manifest as a failed test, which can be recovered from without
|
|
|
|
intervention in most cases, but on Windows, the entire python process can
|
|
|
|
completely and utterly wedge, requiring someone to log in to the buildbot
|
|
|
|
and manually kill the affected process.
|
|
|
|
|
|
|
|
(This is easy to reproduce on Windows, unfortunately, and can be traced to
|
|
|
|
the SO_REUSEADDR socket option having different semantics on Windows versus
|
|
|
|
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
|
|
|
|
listen and then accept connections on identical host/ports. An EADDRINUSE
|
|
|
|
socket.error will be raised at some point (depending on the platform and
|
|
|
|
the order bind and listen were called on each socket).
|
|
|
|
|
|
|
|
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
|
|
|
|
will ever be raised when attempting to bind two identical host/ports. When
|
|
|
|
accept() is called on each socket, the second caller's process will steal
|
|
|
|
the port from the first caller, leaving them both in an awkwardly wedged
|
|
|
|
state where they'll no longer respond to any signals or graceful kills, and
|
|
|
|
must be forcibly killed via OpenProcess()/TerminateProcess().
|
|
|
|
|
|
|
|
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
|
|
|
|
instead of SO_REUSEADDR, which effectively affords the same semantics as
|
|
|
|
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
|
|
|
|
Source world compared to Windows ones, this is a common mistake. A quick
|
|
|
|
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
|
|
|
|
openssl.exe is called with the 's_server' option, for example. See
|
|
|
|
http://bugs.python.org/issue2550 for more info. The following site also
|
|
|
|
has a very thorough description about the implications of both REUSEADDR
|
|
|
|
and EXCLUSIVEADDRUSE on Windows:
|
|
|
|
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
|
|
|
|
|
|
|
|
XXX: although this approach is a vast improvement on previous attempts to
|
|
|
|
elicit unused ports, it rests heavily on the assumption that the ephemeral
|
|
|
|
port returned to us by the OS won't immediately be dished back out to some
|
|
|
|
other process when we close and delete our temporary socket but before our
|
|
|
|
calling code has a chance to bind the returned port. We can deal with this
|
|
|
|
issue if/when we come across it."""
|
|
|
|
tempsock = socket.socket(family, socktype)
|
|
|
|
port = bind_port(tempsock)
|
|
|
|
tempsock.close()
|
|
|
|
del tempsock
|
|
|
|
return port
|
|
|
|
|
|
|
|
def bind_port(sock, host=HOST):
|
|
|
|
"""Bind the socket to a free port and return the port number. Relies on
|
|
|
|
ephemeral ports in order to ensure we are using an unbound port. This is
|
|
|
|
important as many tests may be running simultaneously, especially in a
|
|
|
|
buildbot environment. This method raises an exception if the sock.family
|
|
|
|
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
|
|
|
|
or SO_REUSEPORT set on it. Tests should *never* set these socket options
|
|
|
|
for TCP/IP sockets. The only case for setting these options is testing
|
|
|
|
multicasting via multiple UDP sockets.
|
|
|
|
|
|
|
|
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
|
|
|
|
on Windows), it will be set on the socket. This will prevent anyone else
|
|
|
|
from bind()'ing to our host/port for the duration of the test.
|
|
|
|
"""
|
|
|
|
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
|
|
|
|
if hasattr(socket, 'SO_REUSEADDR'):
|
|
|
|
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
|
|
|
|
raise TestFailed("tests should never set the SO_REUSEADDR " \
|
|
|
|
"socket option on TCP/IP sockets!")
|
|
|
|
if hasattr(socket, 'SO_REUSEPORT'):
|
|
|
|
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
|
|
|
|
raise TestFailed("tests should never set the SO_REUSEPORT " \
|
|
|
|
"socket option on TCP/IP sockets!")
|
|
|
|
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
|
|
|
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
|
|
|
|
|
|
|
|
sock.bind((host, 0))
|
|
|
|
port = sock.getsockname()[1]
|
|
|
|
return port
|
2006-06-11 23:13:21 -03:00
|
|
|
|
1993-01-26 09:04:43 -04:00
|
|
|
FUZZ = 1e-6
|
|
|
|
|
|
|
|
def fcmp(x, y): # fuzzy comparison function
|
2008-07-31 00:00:53 -03:00
|
|
|
if isinstance(x, float) or isinstance(y, float):
|
2000-10-23 14:22:08 -03:00
|
|
|
try:
|
|
|
|
fuzz = (abs(x) + abs(y)) * FUZZ
|
|
|
|
if abs(x-y) <= fuzz:
|
|
|
|
return 0
|
|
|
|
except:
|
|
|
|
pass
|
2008-07-31 00:00:53 -03:00
|
|
|
elif type(x) == type(y) and isinstance(x, (tuple, list)):
|
2000-10-23 14:22:08 -03:00
|
|
|
for i in range(min(len(x), len(y))):
|
|
|
|
outcome = fcmp(x[i], y[i])
|
2000-12-12 19:11:42 -04:00
|
|
|
if outcome != 0:
|
2000-10-23 14:22:08 -03:00
|
|
|
return outcome
|
2008-07-31 00:00:53 -03:00
|
|
|
return (len(x) > len(y)) - (len(x) < len(y))
|
|
|
|
return (x > y) - (x < y)
|
1993-01-26 09:04:43 -04:00
|
|
|
|
2001-08-17 15:39:25 -03:00
|
|
|
try:
|
|
|
|
unicode
|
2007-04-25 14:57:53 -03:00
|
|
|
have_unicode = True
|
2001-08-17 15:39:25 -03:00
|
|
|
except NameError:
|
2007-04-25 14:57:53 -03:00
|
|
|
have_unicode = False
|
2001-08-17 15:39:25 -03:00
|
|
|
|
2002-11-01 14:02:03 -04:00
|
|
|
is_jython = sys.platform.startswith('java')
|
|
|
|
|
2001-03-23 14:04:02 -04:00
|
|
|
# Filename used for testing
|
|
|
|
if os.name == 'java':
|
|
|
|
# Jython disallows @ in module names
|
|
|
|
TESTFN = '$test'
|
2003-05-10 04:36:56 -03:00
|
|
|
elif os.name == 'riscos':
|
|
|
|
TESTFN = 'testfile'
|
|
|
|
else:
|
2001-03-23 14:04:02 -04:00
|
|
|
TESTFN = '@test'
|
2001-05-13 05:04:26 -03:00
|
|
|
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
|
2001-08-17 15:39:25 -03:00
|
|
|
if have_unicode:
|
2003-12-02 21:27:23 -04:00
|
|
|
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
|
|
|
|
# TESTFN_UNICODE is a filename that can be encoded using the
|
|
|
|
# file system encoding, but *not* with the default (ascii) encoding
|
2002-11-09 15:57:26 -04:00
|
|
|
if isinstance('', unicode):
|
|
|
|
# python -U
|
|
|
|
# XXX perhaps unicode() should accept Unicode strings?
|
2003-12-04 01:39:43 -04:00
|
|
|
TESTFN_UNICODE = "@test-\xe0\xf2"
|
2002-11-09 15:57:26 -04:00
|
|
|
else:
|
2003-12-04 01:39:43 -04:00
|
|
|
# 2 latin characters.
|
|
|
|
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
|
|
|
|
TESTFN_ENCODING = sys.getfilesystemencoding()
|
|
|
|
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
|
2003-12-02 21:27:23 -04:00
|
|
|
# able to be encoded by *either* the default or filesystem encoding.
|
2003-12-04 01:39:43 -04:00
|
|
|
# This test really only makes sense on Windows NT platforms
|
2003-12-03 18:16:47 -04:00
|
|
|
# which have special Unicode support in posixmodule.
|
2003-12-04 01:39:43 -04:00
|
|
|
if (not hasattr(sys, "getwindowsversion") or
|
|
|
|
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
|
2004-01-18 16:29:55 -04:00
|
|
|
TESTFN_UNICODE_UNENCODEABLE = None
|
2003-12-02 21:27:23 -04:00
|
|
|
else:
|
2003-12-03 18:16:47 -04:00
|
|
|
# Japanese characters (I think - from bug 846133)
|
2005-03-08 11:03:08 -04:00
|
|
|
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
|
2003-12-03 18:16:47 -04:00
|
|
|
try:
|
|
|
|
# XXX - Note - should be using TESTFN_ENCODING here - but for
|
2003-12-04 01:39:43 -04:00
|
|
|
# Windows, "mbcs" currently always operates as if in
|
2003-12-03 18:16:47 -04:00
|
|
|
# errors=ignore' mode - hence we get '?' characters rather than
|
|
|
|
# the exception. 'Latin1' operates as we expect - ie, fails.
|
|
|
|
# See [ 850997 ] mbcs encoding ignores errors
|
|
|
|
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
|
|
|
|
except UnicodeEncodeError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
print \
|
|
|
|
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
|
|
|
|
'Unicode filename tests may not be effective' \
|
|
|
|
% TESTFN_UNICODE_UNENCODEABLE
|
2002-11-02 20:35:53 -04:00
|
|
|
|
2010-02-10 17:40:33 -04:00
|
|
|
|
2009-05-31 11:20:14 -03:00
|
|
|
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
|
|
|
|
# module name.
|
2010-02-10 17:40:33 -04:00
|
|
|
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
|
2009-05-31 11:20:14 -03:00
|
|
|
|
2010-02-10 17:40:33 -04:00
|
|
|
# Save the initial cwd
|
|
|
|
SAVEDCWD = os.getcwd()
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def temp_cwd(name='tempcwd', quiet=False):
|
|
|
|
"""
|
|
|
|
Context manager that creates a temporary directory and set it as CWD.
|
|
|
|
|
|
|
|
The new CWD is created in the current directory and it's named *name*.
|
|
|
|
If *quiet* is False (default) and it's not possible to create or change
|
|
|
|
the CWD, an error is raised. If it's True, only a warning is raised
|
|
|
|
and the original CWD is used.
|
|
|
|
"""
|
2010-02-20 18:34:21 -04:00
|
|
|
if isinstance(name, unicode):
|
|
|
|
try:
|
|
|
|
name = name.encode(sys.getfilesystemencoding() or 'ascii')
|
|
|
|
except UnicodeEncodeError:
|
|
|
|
if not quiet:
|
|
|
|
raise unittest.SkipTest('unable to encode the cwd name with '
|
|
|
|
'the filesystem encoding.')
|
2010-02-10 17:40:33 -04:00
|
|
|
saved_dir = os.getcwd()
|
|
|
|
is_temporary = False
|
2002-11-02 20:35:53 -04:00
|
|
|
try:
|
2010-02-10 17:40:33 -04:00
|
|
|
os.mkdir(name)
|
|
|
|
os.chdir(name)
|
|
|
|
is_temporary = True
|
|
|
|
except OSError:
|
|
|
|
if not quiet:
|
|
|
|
raise
|
|
|
|
warnings.warn('tests may fail, unable to change the CWD to ' + name,
|
|
|
|
RuntimeWarning, stacklevel=3)
|
|
|
|
try:
|
|
|
|
yield os.getcwd()
|
|
|
|
finally:
|
|
|
|
os.chdir(saved_dir)
|
|
|
|
if is_temporary:
|
|
|
|
rmtree(name)
|
|
|
|
|
2001-03-13 05:31:07 -04:00
|
|
|
|
2010-03-13 08:41:48 -04:00
|
|
|
def findfile(file, here=__file__, subdir=None):
|
2003-05-04 18:15:27 -03:00
|
|
|
"""Try to find a file on sys.path and the working directory. If it is not
|
|
|
|
found the argument passed to the function is returned (this does not
|
|
|
|
necessarily signal failure; could still be the legitimate path)."""
|
2000-10-23 14:22:08 -03:00
|
|
|
if os.path.isabs(file):
|
|
|
|
return file
|
2010-03-13 08:41:48 -04:00
|
|
|
if subdir is not None:
|
|
|
|
file = os.path.join(subdir, file)
|
2000-10-23 14:22:08 -03:00
|
|
|
path = sys.path
|
|
|
|
path = [os.path.dirname(here)] + path
|
|
|
|
for dn in path:
|
|
|
|
fn = os.path.join(dn, file)
|
|
|
|
if os.path.exists(fn): return fn
|
|
|
|
return file
|
2001-01-17 15:11:13 -04:00
|
|
|
|
Get rid of the superstitious "~" in dict hashing's "i = (~hash) & mask".
The comment following used to say:
/* We use ~hash instead of hash, as degenerate hash functions, such
as for ints <sigh>, can have lots of leading zeros. It's not
really a performance risk, but better safe than sorry.
12-Dec-00 tim: so ~hash produces lots of leading ones instead --
what's the gain? */
That is, there was never a good reason for doing it. And to the contrary,
as explained on Python-Dev last December, it tended to make the *sum*
(i + incr) & mask (which is the first table index examined in case of
collison) the same "too often" across distinct hashes.
Changing to the simpler "i = hash & mask" reduced the number of string-dict
collisions (== # number of times we go around the lookup for-loop) from about
6 million to 5 million during a full run of the test suite (these are
approximate because the test suite does some random stuff from run to run).
The number of collisions in non-string dicts also decreased, but not as
dramatically.
Note that this may, for a given dict, change the order (wrt previous
releases) of entries exposed by .keys(), .values() and .items(). A number
of std tests suffered bogus failures as a result. For dicts keyed by
small ints, or (less so) by characters, the order is much more likely to be
in increasing order of key now; e.g.,
>>> d = {}
>>> for i in range(10):
... d[i] = i
...
>>> d
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
>>>
Unfortunately. people may latch on to that in small examples and draw a
bogus conclusion.
test_support.py
Moved test_extcall's sortdict() into test_support, made it stronger,
and imported sortdict into other std tests that needed it.
test_unicode.py
Excluced cp875 from the "roundtrip over range(128)" test, because
cp875 doesn't have a well-defined inverse for unicode("?", "cp875").
See Python-Dev for excruciating details.
Cookie.py
Chaged various output functions to sort dicts before building
strings from them.
test_extcall
Fiddled the expected-result file. This remains sensitive to native
dict ordering, because, e.g., if there are multiple errors in a
keyword-arg dict (and test_extcall sets up many cases like that), the
specific error Python complains about first depends on native dict
ordering.
2001-05-12 21:19:31 -03:00
|
|
|
def sortdict(dict):
|
|
|
|
"Like repr(dict), but in sorted order."
|
|
|
|
items = dict.items()
|
|
|
|
items.sort()
|
|
|
|
reprpairs = ["%r: %r" % pair for pair in items]
|
|
|
|
withcommas = ", ".join(reprpairs)
|
|
|
|
return "{%s}" % withcommas
|
|
|
|
|
2009-01-19 13:37:42 -04:00
|
|
|
def make_bad_fd():
|
|
|
|
"""
|
|
|
|
Create an invalid file descriptor by opening and closing a file and return
|
|
|
|
its fd.
|
|
|
|
"""
|
|
|
|
file = open(TESTFN, "wb")
|
|
|
|
try:
|
|
|
|
return file.fileno()
|
|
|
|
finally:
|
|
|
|
file.close()
|
|
|
|
unlink(TESTFN)
|
|
|
|
|
2006-10-28 10:10:17 -03:00
|
|
|
def check_syntax_error(testcase, statement):
|
2009-03-24 00:24:56 -03:00
|
|
|
testcase.assertRaises(SyntaxError, compile, statement,
|
|
|
|
'<test string>', 'exec')
|
2001-03-21 14:26:33 -04:00
|
|
|
|
2010-03-15 15:08:58 -03:00
|
|
|
def open_urlresource(url, check=None):
|
2009-11-01 18:02:03 -04:00
|
|
|
import urlparse, urllib2
|
2001-03-21 14:26:33 -04:00
|
|
|
|
2005-12-10 13:44:27 -04:00
|
|
|
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
|
|
|
|
|
2009-07-11 19:15:13 -03:00
|
|
|
fn = os.path.join(os.path.dirname(__file__), "data", filename)
|
2010-03-15 15:08:58 -03:00
|
|
|
|
|
|
|
def check_valid_file(fn):
|
|
|
|
f = open(fn)
|
|
|
|
if check is None:
|
|
|
|
return f
|
|
|
|
elif check(f):
|
|
|
|
f.seek(0)
|
|
|
|
return f
|
|
|
|
f.close()
|
|
|
|
|
2009-07-11 19:15:13 -03:00
|
|
|
if os.path.exists(fn):
|
2010-03-15 15:08:58 -03:00
|
|
|
f = check_valid_file(fn)
|
|
|
|
if f is not None:
|
|
|
|
return f
|
|
|
|
unlink(fn)
|
|
|
|
|
|
|
|
# Verify the requirement before downloading the file
|
|
|
|
requires('urlfetch')
|
2005-12-10 13:44:27 -04:00
|
|
|
|
|
|
|
print >> get_original_stdout(), '\tfetching %s ...' % url
|
2009-11-01 18:02:03 -04:00
|
|
|
f = urllib2.urlopen(url, timeout=15)
|
|
|
|
try:
|
|
|
|
with open(fn, "wb") as out:
|
|
|
|
s = f.read()
|
|
|
|
while s:
|
|
|
|
out.write(s)
|
|
|
|
s = f.read()
|
|
|
|
finally:
|
|
|
|
f.close()
|
2010-03-15 15:08:58 -03:00
|
|
|
|
|
|
|
f = check_valid_file(fn)
|
|
|
|
if f is not None:
|
|
|
|
return f
|
|
|
|
raise TestFailed('invalid resource "%s"' % fn)
|
2007-01-29 23:03:46 -04:00
|
|
|
|
2007-01-03 20:23:49 -04:00
|
|
|
|
2008-09-11 09:11:06 -03:00
|
|
|
class WarningsRecorder(object):
|
|
|
|
"""Convenience wrapper for the warnings list returned on
|
|
|
|
entry to the warnings.catch_warnings() context manager.
|
|
|
|
"""
|
|
|
|
def __init__(self, warnings_list):
|
2010-03-07 08:18:33 -04:00
|
|
|
self._warnings = warnings_list
|
|
|
|
self._last = 0
|
2008-09-11 09:11:06 -03:00
|
|
|
|
|
|
|
def __getattr__(self, attr):
|
2010-03-07 08:18:33 -04:00
|
|
|
if len(self._warnings) > self._last:
|
|
|
|
return getattr(self._warnings[-1], attr)
|
2008-09-11 09:11:06 -03:00
|
|
|
elif attr in warnings.WarningMessage._WARNING_DETAILS:
|
|
|
|
return None
|
|
|
|
raise AttributeError("%r has no attribute %r" % (self, attr))
|
|
|
|
|
2010-03-07 08:18:33 -04:00
|
|
|
@property
|
|
|
|
def warnings(self):
|
|
|
|
return self._warnings[self._last:]
|
|
|
|
|
2008-09-11 09:11:06 -03:00
|
|
|
def reset(self):
|
2010-03-07 08:18:33 -04:00
|
|
|
self._last = len(self._warnings)
|
2008-09-11 09:11:06 -03:00
|
|
|
|
2010-03-07 08:18:33 -04:00
|
|
|
|
|
|
|
def _filterwarnings(filters, quiet=False):
|
|
|
|
"""Catch the warnings, then check if all the expected
|
|
|
|
warnings have been raised and re-raise unexpected warnings.
|
|
|
|
If 'quiet' is True, only re-raise the unexpected warnings.
|
|
|
|
"""
|
|
|
|
# Clear the warning registry of the calling module
|
|
|
|
# in order to re-raise the warnings.
|
|
|
|
frame = sys._getframe(2)
|
|
|
|
registry = frame.f_globals.get('__warningregistry__')
|
|
|
|
if registry:
|
|
|
|
registry.clear()
|
2008-09-11 09:11:06 -03:00
|
|
|
with warnings.catch_warnings(record=True) as w:
|
2010-03-09 15:57:01 -04:00
|
|
|
# Set filter "always" to record all warnings. Because
|
|
|
|
# test_warnings swap the module, we need to look up in
|
|
|
|
# the sys.modules dictionary.
|
|
|
|
sys.modules['warnings'].simplefilter("always")
|
2008-09-11 09:11:06 -03:00
|
|
|
yield WarningsRecorder(w)
|
2010-03-07 08:18:33 -04:00
|
|
|
# Filter the recorded warnings
|
|
|
|
reraise = [warning.message for warning in w]
|
|
|
|
missing = []
|
|
|
|
for msg, cat in filters:
|
|
|
|
seen = False
|
|
|
|
for exc in reraise[:]:
|
|
|
|
message = str(exc)
|
|
|
|
# Filter out the matching messages
|
|
|
|
if (re.match(msg, message, re.I) and
|
|
|
|
issubclass(exc.__class__, cat)):
|
|
|
|
seen = True
|
|
|
|
reraise.remove(exc)
|
|
|
|
if not seen and not quiet:
|
|
|
|
# This filter caught nothing
|
|
|
|
missing.append((msg, cat.__name__))
|
2010-03-07 15:14:12 -04:00
|
|
|
if reraise:
|
|
|
|
raise AssertionError("unhandled warning %r" % reraise[0])
|
|
|
|
if missing:
|
|
|
|
raise AssertionError("filter (%r, %s) did not catch any warning" %
|
|
|
|
missing[0])
|
2010-03-07 08:18:33 -04:00
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def check_warnings(*filters, **kwargs):
|
|
|
|
"""Context manager to silence warnings.
|
|
|
|
|
|
|
|
Accept 2-tuples as positional arguments:
|
|
|
|
("message regexp", WarningCategory)
|
|
|
|
|
|
|
|
Optional argument:
|
|
|
|
- if 'quiet' is True, it does not fail if a filter catches nothing
|
2010-03-18 16:51:47 -03:00
|
|
|
(default True without argument,
|
|
|
|
default False if some filters are defined)
|
2010-03-07 08:18:33 -04:00
|
|
|
|
|
|
|
Without argument, it defaults to:
|
2010-03-18 16:51:47 -03:00
|
|
|
check_warnings(("", Warning), quiet=True)
|
2010-03-07 08:18:33 -04:00
|
|
|
"""
|
2010-03-18 16:51:47 -03:00
|
|
|
quiet = kwargs.get('quiet')
|
2010-03-07 08:18:33 -04:00
|
|
|
if not filters:
|
|
|
|
filters = (("", Warning),)
|
2010-03-18 16:51:47 -03:00
|
|
|
# Preserve backward compatibility
|
|
|
|
if quiet is None:
|
|
|
|
quiet = True
|
|
|
|
return _filterwarnings(filters, quiet)
|
2010-03-07 08:18:33 -04:00
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def check_py3k_warnings(*filters, **kwargs):
|
|
|
|
"""Context manager to silence py3k warnings.
|
|
|
|
|
|
|
|
Accept 2-tuples as positional arguments:
|
|
|
|
("message regexp", WarningCategory)
|
|
|
|
|
|
|
|
Optional argument:
|
|
|
|
- if 'quiet' is True, it does not fail if a filter catches nothing
|
|
|
|
(default False)
|
|
|
|
|
|
|
|
Without argument, it defaults to:
|
|
|
|
check_py3k_warnings(("", DeprecationWarning), quiet=False)
|
|
|
|
"""
|
|
|
|
if sys.py3kwarning:
|
|
|
|
if not filters:
|
|
|
|
filters = (("", DeprecationWarning),)
|
|
|
|
else:
|
|
|
|
# It should not raise any py3k warning
|
|
|
|
filters = ()
|
|
|
|
return _filterwarnings(filters, kwargs.get('quiet'))
|
2008-09-11 09:11:06 -03:00
|
|
|
|
|
|
|
|
2008-05-11 04:06:04 -03:00
|
|
|
class CleanImport(object):
|
|
|
|
"""Context manager to force import to return a new module reference.
|
|
|
|
|
|
|
|
This is useful for testing module-level behaviours, such as
|
2008-07-13 09:23:47 -03:00
|
|
|
the emission of a DeprecationWarning on import.
|
2008-05-11 04:06:04 -03:00
|
|
|
|
|
|
|
Use like this:
|
|
|
|
|
|
|
|
with CleanImport("foo"):
|
2010-02-03 18:13:44 -04:00
|
|
|
importlib.import_module("foo") # new reference
|
2008-05-11 04:06:04 -03:00
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, *module_names):
|
|
|
|
self.original_modules = sys.modules.copy()
|
|
|
|
for module_name in module_names:
|
|
|
|
if module_name in sys.modules:
|
|
|
|
module = sys.modules[module_name]
|
2008-05-11 04:08:12 -03:00
|
|
|
# It is possible that module_name is just an alias for
|
2008-05-11 04:06:04 -03:00
|
|
|
# another module (e.g. stub for modules renamed in 3.x).
|
|
|
|
# In that case, we also need delete the real module to clear
|
|
|
|
# the import cache.
|
|
|
|
if module.__name__ != module_name:
|
|
|
|
del sys.modules[module.__name__]
|
|
|
|
del sys.modules[module_name]
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, *ignore_exc):
|
|
|
|
sys.modules.update(self.original_modules)
|
|
|
|
|
|
|
|
|
2009-05-01 14:35:37 -03:00
|
|
|
class EnvironmentVarGuard(UserDict.DictMixin):
|
2007-01-03 20:23:49 -04:00
|
|
|
|
|
|
|
"""Class to help protect the environment variable properly. Can be used as
|
|
|
|
a context manager."""
|
|
|
|
|
|
|
|
def __init__(self):
|
2009-05-01 14:35:37 -03:00
|
|
|
self._environ = os.environ
|
2009-04-25 09:15:07 -03:00
|
|
|
self._changed = {}
|
2007-01-03 20:23:49 -04:00
|
|
|
|
2009-05-01 14:35:37 -03:00
|
|
|
def __getitem__(self, envvar):
|
|
|
|
return self._environ[envvar]
|
|
|
|
|
|
|
|
def __setitem__(self, envvar, value):
|
2009-04-25 09:15:07 -03:00
|
|
|
# Remember the initial value on the first access
|
|
|
|
if envvar not in self._changed:
|
2009-05-01 14:35:37 -03:00
|
|
|
self._changed[envvar] = self._environ.get(envvar)
|
|
|
|
self._environ[envvar] = value
|
2007-01-03 20:23:49 -04:00
|
|
|
|
2009-05-01 14:35:37 -03:00
|
|
|
def __delitem__(self, envvar):
|
2009-04-25 09:15:07 -03:00
|
|
|
# Remember the initial value on the first access
|
|
|
|
if envvar not in self._changed:
|
2009-05-01 14:35:37 -03:00
|
|
|
self._changed[envvar] = self._environ.get(envvar)
|
|
|
|
if envvar in self._environ:
|
|
|
|
del self._environ[envvar]
|
|
|
|
|
|
|
|
def keys(self):
|
|
|
|
return self._environ.keys()
|
|
|
|
|
|
|
|
def set(self, envvar, value):
|
|
|
|
self[envvar] = value
|
|
|
|
|
|
|
|
def unset(self, envvar):
|
|
|
|
del self[envvar]
|
2007-01-03 20:23:49 -04:00
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, *ignore_exc):
|
2009-04-25 09:15:07 -03:00
|
|
|
for (k, v) in self._changed.items():
|
|
|
|
if v is None:
|
2009-05-01 14:35:37 -03:00
|
|
|
if k in self._environ:
|
|
|
|
del self._environ[k]
|
2009-04-25 09:15:07 -03:00
|
|
|
else:
|
2009-05-01 14:35:37 -03:00
|
|
|
self._environ[k] = v
|
2009-10-17 12:45:52 -03:00
|
|
|
os.environ = self._environ
|
2009-04-25 09:15:07 -03:00
|
|
|
|
2007-01-03 20:23:49 -04:00
|
|
|
|
2009-10-17 11:40:54 -03:00
|
|
|
class DirsOnSysPath(object):
|
|
|
|
"""Context manager to temporarily add directories to sys.path.
|
|
|
|
|
|
|
|
This makes a copy of sys.path, appends any directories given
|
|
|
|
as positional arguments, then reverts sys.path to the copied
|
|
|
|
settings when the context ends.
|
|
|
|
|
|
|
|
Note that *all* sys.path modifications in the body of the
|
|
|
|
context manager, including replacement of the object,
|
|
|
|
will be reverted at the end of the block.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, *paths):
|
|
|
|
self.original_value = sys.path[:]
|
|
|
|
self.original_object = sys.path
|
|
|
|
sys.path.extend(paths)
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, *ignore_exc):
|
|
|
|
sys.path = self.original_object
|
|
|
|
sys.path[:] = self.original_value
|
|
|
|
|
|
|
|
|
2007-03-08 19:58:11 -04:00
|
|
|
class TransientResource(object):
|
|
|
|
|
|
|
|
"""Raise ResourceDenied if an exception is raised while the context manager
|
|
|
|
is in effect that matches the specified exception and attributes."""
|
|
|
|
|
|
|
|
def __init__(self, exc, **kwargs):
|
|
|
|
self.exc = exc
|
|
|
|
self.attrs = kwargs
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, type_=None, value=None, traceback=None):
|
|
|
|
"""If type_ is a subclass of self.exc and value has attributes matching
|
|
|
|
self.attrs, raise ResourceDenied. Otherwise let the exception
|
|
|
|
propagate (if any)."""
|
|
|
|
if type_ is not None and issubclass(self.exc, type_):
|
|
|
|
for attr, attr_value in self.attrs.iteritems():
|
|
|
|
if not hasattr(value, attr):
|
|
|
|
break
|
|
|
|
if getattr(value, attr) != attr_value:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise ResourceDenied("an optional resource is not available")
|
|
|
|
|
2001-03-21 14:26:33 -04:00
|
|
|
|
2010-06-03 17:19:25 -03:00
|
|
|
_transients = {
|
|
|
|
IOError: (errno.ECONNRESET, errno.ETIMEDOUT),
|
|
|
|
socket.error: (errno.ECONNRESET,),
|
|
|
|
socket.gaierror: [getattr(socket, t)
|
|
|
|
for t in ('EAI_NODATA', 'EAI_NONAME')
|
|
|
|
if hasattr(socket, t)],
|
|
|
|
}
|
2009-10-18 07:29:10 -03:00
|
|
|
@contextlib.contextmanager
|
2007-03-12 23:34:09 -03:00
|
|
|
def transient_internet():
|
|
|
|
"""Return a context manager that raises ResourceDenied when various issues
|
2010-06-03 17:19:25 -03:00
|
|
|
with the Internet connection manifest themselves as exceptions.
|
|
|
|
|
|
|
|
Errors caught:
|
|
|
|
timeout IOError errno = ETIMEDOUT
|
|
|
|
socket reset socket.error, IOError errno = ECONNRESET
|
|
|
|
dns no data socket.gaierror errno = EAI_NODATA
|
|
|
|
dns no name socket.gaierror errno = EAI_NONAME
|
|
|
|
"""
|
|
|
|
try:
|
2009-10-18 07:29:10 -03:00
|
|
|
yield
|
2010-06-03 17:19:25 -03:00
|
|
|
except tuple(_transients) as err:
|
|
|
|
for errtype in _transients:
|
|
|
|
if isinstance(err, errtype) and err.errno in _transients[errtype]:
|
|
|
|
raise ResourceDenied("could not establish network "
|
|
|
|
"connection ({})".format(err))
|
|
|
|
raise
|
2007-03-12 23:34:09 -03:00
|
|
|
|
|
|
|
|
2007-08-24 15:22:54 -03:00
|
|
|
@contextlib.contextmanager
|
2008-04-01 09:37:43 -03:00
|
|
|
def captured_output(stream_name):
|
|
|
|
"""Run the 'with' statement body using a StringIO object in place of a
|
|
|
|
specific attribute on the sys module.
|
|
|
|
Example use (with 'stream_name=stdout')::
|
2007-08-24 15:22:54 -03:00
|
|
|
|
|
|
|
with captured_stdout() as s:
|
|
|
|
print "hello"
|
|
|
|
assert s.getvalue() == "hello"
|
|
|
|
"""
|
|
|
|
import StringIO
|
2008-04-01 09:37:43 -03:00
|
|
|
orig_stdout = getattr(sys, stream_name)
|
|
|
|
setattr(sys, stream_name, StringIO.StringIO())
|
2008-04-30 18:03:58 -03:00
|
|
|
try:
|
|
|
|
yield getattr(sys, stream_name)
|
|
|
|
finally:
|
|
|
|
setattr(sys, stream_name, orig_stdout)
|
2008-04-01 09:37:43 -03:00
|
|
|
|
|
|
|
def captured_stdout():
|
|
|
|
return captured_output("stdout")
|
2007-08-24 15:22:54 -03:00
|
|
|
|
2009-10-17 12:09:41 -03:00
|
|
|
def captured_stdin():
|
|
|
|
return captured_output("stdin")
|
|
|
|
|
2009-03-26 16:58:18 -03:00
|
|
|
def gc_collect():
|
|
|
|
"""Force as many objects as possible to be collected.
|
|
|
|
|
|
|
|
In non-CPython implementations of Python, this is needed because timely
|
|
|
|
deallocation is not guaranteed by the garbage collector. (Even in CPython
|
|
|
|
this can be the case in case of reference cycles.) This means that __del__
|
|
|
|
methods may be called later than expected and weakrefs may remain alive for
|
|
|
|
longer than expected. This function tries its best to force all garbage
|
|
|
|
objects to disappear.
|
|
|
|
"""
|
|
|
|
gc.collect()
|
2010-04-10 16:39:07 -03:00
|
|
|
if is_jython:
|
|
|
|
time.sleep(0.1)
|
2009-03-26 16:58:18 -03:00
|
|
|
gc.collect()
|
|
|
|
gc.collect()
|
|
|
|
|
2007-08-24 15:22:54 -03:00
|
|
|
|
2006-04-30 08:13:56 -03:00
|
|
|
#=======================================================================
|
|
|
|
# Decorator for running a function in a different locale, correctly resetting
|
|
|
|
# it afterwards.
|
|
|
|
|
|
|
|
def run_with_locale(catstr, *locales):
|
|
|
|
def decorator(func):
|
|
|
|
def inner(*args, **kwds):
|
|
|
|
try:
|
|
|
|
import locale
|
|
|
|
category = getattr(locale, catstr)
|
|
|
|
orig_locale = locale.setlocale(category)
|
|
|
|
except AttributeError:
|
|
|
|
# if the test author gives us an invalid category string
|
|
|
|
raise
|
|
|
|
except:
|
|
|
|
# cannot retrieve original locale, so do nothing
|
|
|
|
locale = orig_locale = None
|
|
|
|
else:
|
|
|
|
for loc in locales:
|
|
|
|
try:
|
|
|
|
locale.setlocale(category, loc)
|
|
|
|
break
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# now run the function, resetting the locale on exceptions
|
|
|
|
try:
|
|
|
|
return func(*args, **kwds)
|
|
|
|
finally:
|
|
|
|
if locale and orig_locale:
|
|
|
|
locale.setlocale(category, orig_locale)
|
|
|
|
inner.func_name = func.func_name
|
|
|
|
inner.__doc__ = func.__doc__
|
|
|
|
return inner
|
|
|
|
return decorator
|
|
|
|
|
2006-04-26 12:53:30 -03:00
|
|
|
#=======================================================================
|
|
|
|
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
|
|
|
|
|
|
|
|
# Some handy shorthands. Note that these are used for byte-limits as well
|
|
|
|
# as size-limits, in the various bigmem tests
|
|
|
|
_1M = 1024*1024
|
|
|
|
_1G = 1024 * _1M
|
|
|
|
_2G = 2 * _1G
|
2008-07-31 14:17:14 -03:00
|
|
|
_4G = 4 * _1G
|
2006-04-26 12:53:30 -03:00
|
|
|
|
2008-05-26 22:18:39 -03:00
|
|
|
MAX_Py_ssize_t = sys.maxsize
|
2006-08-09 12:37:26 -03:00
|
|
|
|
2006-04-26 12:53:30 -03:00
|
|
|
def set_memlimit(limit):
|
|
|
|
global max_memuse
|
2008-07-31 14:17:14 -03:00
|
|
|
global real_max_memuse
|
2006-04-26 12:53:30 -03:00
|
|
|
sizes = {
|
|
|
|
'k': 1024,
|
|
|
|
'm': _1M,
|
|
|
|
'g': _1G,
|
|
|
|
't': 1024*_1G,
|
|
|
|
}
|
|
|
|
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
|
|
|
|
re.IGNORECASE | re.VERBOSE)
|
|
|
|
if m is None:
|
|
|
|
raise ValueError('Invalid memory limit %r' % (limit,))
|
|
|
|
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
|
2008-07-31 14:17:14 -03:00
|
|
|
real_max_memuse = memlimit
|
2006-08-09 12:37:26 -03:00
|
|
|
if memlimit > MAX_Py_ssize_t:
|
|
|
|
memlimit = MAX_Py_ssize_t
|
|
|
|
if memlimit < _2G - 1:
|
2006-04-26 12:53:30 -03:00
|
|
|
raise ValueError('Memory limit %r too low to be useful' % (limit,))
|
|
|
|
max_memuse = memlimit
|
|
|
|
|
|
|
|
def bigmemtest(minsize, memuse, overhead=5*_1M):
|
|
|
|
"""Decorator for bigmem tests.
|
|
|
|
|
|
|
|
'minsize' is the minimum useful size for the test (in arbitrary,
|
|
|
|
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
|
|
|
|
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
|
2008-04-13 00:17:30 -03:00
|
|
|
independent of the testsize, and defaults to 5Mb.
|
2006-04-26 14:11:16 -03:00
|
|
|
|
2006-04-26 12:53:30 -03:00
|
|
|
The decorator tries to guess a good value for 'size' and passes it to
|
|
|
|
the decorated test function. If minsize * memuse is more than the
|
|
|
|
allowed memory use (as defined by max_memuse), the test is skipped.
|
|
|
|
Otherwise, minsize is adjusted upward to use up to max_memuse.
|
|
|
|
"""
|
|
|
|
def decorator(f):
|
|
|
|
def wrapper(self):
|
|
|
|
if not max_memuse:
|
|
|
|
# If max_memuse is 0 (the default),
|
|
|
|
# we still want to run the tests with size set to a few kb,
|
|
|
|
# to make sure they work. We still want to avoid using
|
|
|
|
# too much memory, though, but we do that noisily.
|
2006-04-27 19:38:32 -03:00
|
|
|
maxsize = 5147
|
2009-06-30 19:57:08 -03:00
|
|
|
self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
|
2006-04-26 12:53:30 -03:00
|
|
|
else:
|
|
|
|
maxsize = int((max_memuse - overhead) / memuse)
|
|
|
|
if maxsize < minsize:
|
|
|
|
# Really ought to print 'test skipped' or something
|
|
|
|
if verbose:
|
|
|
|
sys.stderr.write("Skipping %s because of memory "
|
|
|
|
"constraint\n" % (f.__name__,))
|
|
|
|
return
|
|
|
|
# Try to keep some breathing room in memory use
|
|
|
|
maxsize = max(maxsize - 50 * _1M, minsize)
|
|
|
|
return f(self, maxsize)
|
|
|
|
wrapper.minsize = minsize
|
|
|
|
wrapper.memuse = memuse
|
|
|
|
wrapper.overhead = overhead
|
|
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
2008-07-31 14:17:14 -03:00
|
|
|
def precisionbigmemtest(size, memuse, overhead=5*_1M):
|
|
|
|
def decorator(f):
|
|
|
|
def wrapper(self):
|
|
|
|
if not real_max_memuse:
|
|
|
|
maxsize = 5147
|
|
|
|
else:
|
|
|
|
maxsize = size
|
|
|
|
|
|
|
|
if real_max_memuse and real_max_memuse < maxsize * memuse:
|
|
|
|
if verbose:
|
|
|
|
sys.stderr.write("Skipping %s because of memory "
|
|
|
|
"constraint\n" % (f.__name__,))
|
|
|
|
return
|
|
|
|
|
|
|
|
return f(self, maxsize)
|
|
|
|
wrapper.size = size
|
|
|
|
wrapper.memuse = memuse
|
|
|
|
wrapper.overhead = overhead
|
|
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
2006-08-09 12:37:26 -03:00
|
|
|
def bigaddrspacetest(f):
|
|
|
|
"""Decorator for tests that fill the address space."""
|
|
|
|
def wrapper(self):
|
|
|
|
if max_memuse < MAX_Py_ssize_t:
|
|
|
|
if verbose:
|
|
|
|
sys.stderr.write("Skipping %s because of memory "
|
|
|
|
"constraint\n" % (f.__name__,))
|
|
|
|
else:
|
|
|
|
return f(self)
|
|
|
|
return wrapper
|
|
|
|
|
2001-03-21 14:26:33 -04:00
|
|
|
#=======================================================================
|
2007-04-25 15:54:36 -03:00
|
|
|
# unittest integration.
|
2001-03-21 14:26:33 -04:00
|
|
|
|
2001-03-22 04:45:36 -04:00
|
|
|
class BasicTestRunner:
|
2001-03-21 14:26:33 -04:00
|
|
|
def run(self, test):
|
2001-03-22 04:45:36 -04:00
|
|
|
result = unittest.TestResult()
|
2001-03-21 14:26:33 -04:00
|
|
|
test(result)
|
|
|
|
return result
|
|
|
|
|
2009-03-26 16:58:18 -03:00
|
|
|
def _id(obj):
|
|
|
|
return obj
|
|
|
|
|
|
|
|
def requires_resource(resource):
|
|
|
|
if resource_is_enabled(resource):
|
|
|
|
return _id
|
|
|
|
else:
|
|
|
|
return unittest.skip("resource {0!r} is not enabled".format(resource))
|
|
|
|
|
|
|
|
def cpython_only(test):
|
|
|
|
"""
|
|
|
|
Decorator for tests only applicable on CPython.
|
|
|
|
"""
|
|
|
|
return impl_detail(cpython=True)(test)
|
|
|
|
|
|
|
|
def impl_detail(msg=None, **guards):
|
2009-03-26 18:30:54 -03:00
|
|
|
if check_impl_detail(**guards):
|
2009-03-26 16:58:18 -03:00
|
|
|
return _id
|
|
|
|
if msg is None:
|
|
|
|
guardnames, default = _parse_guards(guards)
|
|
|
|
if default:
|
|
|
|
msg = "implementation detail not available on {0}"
|
|
|
|
else:
|
|
|
|
msg = "implementation detail specific to {0}"
|
|
|
|
guardnames = sorted(guardnames.keys())
|
|
|
|
msg = msg.format(' or '.join(guardnames))
|
|
|
|
return unittest.skip(msg)
|
|
|
|
|
|
|
|
def _parse_guards(guards):
|
|
|
|
# Returns a tuple ({platform_name: run_me}, default_value)
|
|
|
|
if not guards:
|
|
|
|
return ({'cpython': True}, False)
|
|
|
|
is_true = guards.values()[0]
|
|
|
|
assert guards.values() == [is_true] * len(guards) # all True or all False
|
|
|
|
return (guards, not is_true)
|
|
|
|
|
|
|
|
# Use the following check to guard CPython's implementation-specific tests --
|
|
|
|
# or to run them only on the implementation(s) guarded by the arguments.
|
|
|
|
def check_impl_detail(**guards):
|
|
|
|
"""This function returns True or False depending on the host platform.
|
|
|
|
Examples:
|
|
|
|
if check_impl_detail(): # only on CPython (default)
|
|
|
|
if check_impl_detail(jython=True): # only on Jython
|
|
|
|
if check_impl_detail(cpython=False): # everywhere except on CPython
|
|
|
|
"""
|
|
|
|
guards, default = _parse_guards(guards)
|
|
|
|
return guards.get(platform.python_implementation().lower(), default)
|
|
|
|
|
|
|
|
|
2001-03-21 14:26:33 -04:00
|
|
|
|
2007-04-25 14:29:52 -03:00
|
|
|
def _run_suite(suite):
|
2001-09-20 03:31:22 -03:00
|
|
|
"""Run tests from a unittest.TestSuite-derived class."""
|
2001-03-21 14:26:33 -04:00
|
|
|
if verbose:
|
2001-03-23 00:21:17 -04:00
|
|
|
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
|
2001-03-21 14:26:33 -04:00
|
|
|
else:
|
2001-03-22 04:45:36 -04:00
|
|
|
runner = BasicTestRunner()
|
2001-03-21 14:26:33 -04:00
|
|
|
|
2001-03-22 04:45:36 -04:00
|
|
|
result = runner.run(suite)
|
|
|
|
if not result.wasSuccessful():
|
2001-07-16 15:51:32 -03:00
|
|
|
if len(result.errors) == 1 and not result.failures:
|
|
|
|
err = result.errors[0][1]
|
|
|
|
elif len(result.failures) == 1 and not result.errors:
|
|
|
|
err = result.failures[0][1]
|
|
|
|
else:
|
2009-10-19 14:53:58 -03:00
|
|
|
err = "multiple errors occurred"
|
|
|
|
if not verbose:
|
|
|
|
err += "; run in verbose mode for details"
|
2001-09-08 00:37:56 -03:00
|
|
|
raise TestFailed(err)
|
2001-09-09 03:12:01 -03:00
|
|
|
|
2001-09-20 03:30:41 -03:00
|
|
|
|
2003-05-01 14:45:56 -03:00
|
|
|
def run_unittest(*classes):
|
|
|
|
"""Run tests from unittest.TestCase-derived classes."""
|
2007-04-25 14:29:52 -03:00
|
|
|
valid_types = (unittest.TestSuite, unittest.TestCase)
|
2003-04-27 04:54:23 -03:00
|
|
|
suite = unittest.TestSuite()
|
2003-05-01 14:45:56 -03:00
|
|
|
for cls in classes:
|
2007-04-25 14:29:52 -03:00
|
|
|
if isinstance(cls, str):
|
|
|
|
if cls in sys.modules:
|
|
|
|
suite.addTest(unittest.findTestCases(sys.modules[cls]))
|
|
|
|
else:
|
|
|
|
raise ValueError("str arguments must be keys in sys.modules")
|
|
|
|
elif isinstance(cls, valid_types):
|
2003-07-15 23:59:32 -03:00
|
|
|
suite.addTest(cls)
|
|
|
|
else:
|
|
|
|
suite.addTest(unittest.makeSuite(cls))
|
2007-04-25 14:29:52 -03:00
|
|
|
_run_suite(suite)
|
2003-04-27 04:54:23 -03:00
|
|
|
|
2001-09-20 03:30:41 -03:00
|
|
|
|
2001-09-09 03:12:01 -03:00
|
|
|
#=======================================================================
|
|
|
|
# doctest driver.
|
|
|
|
|
|
|
|
def run_doctest(module, verbosity=None):
|
2001-10-03 01:08:26 -03:00
|
|
|
"""Run doctest on the given module. Return (#failures, #tests).
|
2001-09-09 03:12:01 -03:00
|
|
|
|
|
|
|
If optional argument verbosity is not specified (or is None), pass
|
2001-09-09 22:39:21 -03:00
|
|
|
test_support's belief about verbosity on to doctest. Else doctest's
|
|
|
|
usual behavior is used (it searches sys.argv for -v).
|
2001-09-09 03:12:01 -03:00
|
|
|
"""
|
|
|
|
|
|
|
|
import doctest
|
|
|
|
|
|
|
|
if verbosity is None:
|
|
|
|
verbosity = verbose
|
|
|
|
else:
|
|
|
|
verbosity = None
|
|
|
|
|
2001-09-25 16:13:20 -03:00
|
|
|
# Direct doctest output (normally just errors) to real stdout; doctest
|
|
|
|
# output shouldn't be compared by regrtest.
|
|
|
|
save_stdout = sys.stdout
|
2001-09-25 17:05:11 -03:00
|
|
|
sys.stdout = get_original_stdout()
|
2001-09-25 16:13:20 -03:00
|
|
|
try:
|
|
|
|
f, t = doctest.testmod(module, verbose=verbosity)
|
|
|
|
if f:
|
|
|
|
raise TestFailed("%d of %d doctests failed" % (f, t))
|
|
|
|
finally:
|
|
|
|
sys.stdout = save_stdout
|
2003-05-16 21:58:33 -03:00
|
|
|
if verbose:
|
2003-05-16 22:59:57 -03:00
|
|
|
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
|
2003-05-16 21:58:33 -03:00
|
|
|
return f, t
|
2006-06-18 16:35:01 -03:00
|
|
|
|
|
|
|
#=======================================================================
|
|
|
|
# Threading support to prevent reporting refleaks when running regrtest.py -R
|
|
|
|
|
2009-10-30 14:07:08 -03:00
|
|
|
# NOTE: we use thread._count() rather than threading.enumerate() (or the
|
|
|
|
# moral equivalent thereof) because a threading.Thread object is still alive
|
|
|
|
# until its __bootstrap() method has returned, even after it has been
|
|
|
|
# unregistered from the threading module.
|
|
|
|
# thread._count(), on the other hand, only gets decremented *after* the
|
|
|
|
# __bootstrap() method has returned, which gives us reliable reference counts
|
|
|
|
# at the end of a test run.
|
|
|
|
|
2006-06-18 16:35:01 -03:00
|
|
|
def threading_setup():
|
2010-04-27 18:46:03 -03:00
|
|
|
if thread:
|
|
|
|
return thread._count(),
|
|
|
|
else:
|
|
|
|
return 1,
|
2006-06-18 16:35:01 -03:00
|
|
|
|
2009-10-30 14:07:08 -03:00
|
|
|
def threading_cleanup(nb_threads):
|
2010-04-27 18:46:03 -03:00
|
|
|
if not thread:
|
|
|
|
return
|
2006-06-18 16:35:01 -03:00
|
|
|
|
|
|
|
_MAX_COUNT = 10
|
2009-10-30 14:07:08 -03:00
|
|
|
for count in range(_MAX_COUNT):
|
|
|
|
n = thread._count()
|
|
|
|
if n == nb_threads:
|
|
|
|
break
|
2006-06-18 16:35:01 -03:00
|
|
|
time.sleep(0.1)
|
2009-10-30 14:07:08 -03:00
|
|
|
# XXX print a warning in case of failure?
|
2006-06-29 01:10:08 -03:00
|
|
|
|
2009-05-14 19:40:34 -03:00
|
|
|
def reap_threads(func):
|
2010-04-27 18:46:03 -03:00
|
|
|
"""Use this function when threads are being used. This will
|
|
|
|
ensure that the threads are cleaned up even when the test fails.
|
|
|
|
If threading is unavailable this function does nothing.
|
|
|
|
"""
|
|
|
|
if not thread:
|
|
|
|
return func
|
|
|
|
|
2009-05-14 19:40:34 -03:00
|
|
|
@functools.wraps(func)
|
|
|
|
def decorator(*args):
|
|
|
|
key = threading_setup()
|
|
|
|
try:
|
|
|
|
return func(*args)
|
|
|
|
finally:
|
|
|
|
threading_cleanup(*key)
|
|
|
|
return decorator
|
|
|
|
|
2006-06-29 01:10:08 -03:00
|
|
|
def reap_children():
|
|
|
|
"""Use this function at the end of test_main() whenever sub-processes
|
|
|
|
are started. This will help ensure that no extra children (zombies)
|
|
|
|
stick around to hog resources and create problems when looking
|
|
|
|
for refleaks.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Reap all our dead child processes so we don't leave zombies around.
|
|
|
|
# These hog resources and might be causing some of the buildbots to die.
|
|
|
|
if hasattr(os, 'waitpid'):
|
|
|
|
any_process = -1
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
# This will raise an exception on Windows. That's ok.
|
|
|
|
pid, status = os.waitpid(any_process, os.WNOHANG)
|
|
|
|
if pid == 0:
|
|
|
|
break
|
|
|
|
except:
|
|
|
|
break
|
2009-06-12 17:14:08 -03:00
|
|
|
|
|
|
|
def py3k_bytes(b):
|
|
|
|
"""Emulate the py3k bytes() constructor.
|
|
|
|
|
|
|
|
NOTE: This is only a best effort function.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
# memoryview?
|
|
|
|
return b.tobytes()
|
|
|
|
except AttributeError:
|
|
|
|
try:
|
|
|
|
# iterable of ints?
|
|
|
|
return b"".join(chr(x) for x in b)
|
|
|
|
except TypeError:
|
|
|
|
return bytes(b)
|