Merged revisions 61038,61042-61045,61047,61050,61053,61055-61056,61061-61062,61066,61068,61070,61083,61085,61092-61103 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk ........ r61098 | jeffrey.yasskin | 2008-02-28 05:45:36 +0100 (Thu, 28 Feb 2008) | 7 lines Move abc._Abstract into object by adding a new flag Py_TPFLAGS_IS_ABSTRACT, which forbids constructing types that have it set. The effect is to speed ./python.exe -m timeit -s 'import abc' -s 'class Foo(object): __metaclass__ = abc.ABCMeta' 'Foo()' up from 2.5us to 0.201us. This fixes issue 1762. ........ r61099 | jeffrey.yasskin | 2008-02-28 06:53:18 +0100 (Thu, 28 Feb 2008) | 3 lines Speed test_socketserver up from 28.739s to 0.226s, simplify the logic, and make sure all tests run even if some fail. ........ r61100 | jeffrey.yasskin | 2008-02-28 07:09:19 +0100 (Thu, 28 Feb 2008) | 21 lines Thread.start() used sleep(0.000001) to make sure it didn't return before the new thread had started. At least on my MacBook Pro, that wound up sleeping for a full 10ms (probably 1 jiffy). By using an Event instead, we can be absolutely certain that the thread has started, and return more quickly (217us). Before: $ ./python.exe -m timeit -s 'from threading import Thread' 't = Thread(); t.start(); t.join()' 100 loops, best of 3: 10.3 msec per loop $ ./python.exe -m timeit -s 'from threading import Thread; t = Thread()' 't.isAlive()' 1000000 loops, best of 3: 0.47 usec per loop After: $ ./python.exe -m timeit -s 'from threading import Thread' 't = Thread(); t.start(); t.join()' 1000 loops, best of 3: 217 usec per loop $ ./python.exe -m timeit -s 'from threading import Thread; t = Thread()' 't.isAlive()' 1000000 loops, best of 3: 0.86 usec per loop To be fair, the 10ms isn't CPU time, and other threads including the spawned one get to run during it. There are also some slightly more complicated ways to get back the .4us in isAlive() if we want. ........ r61101 | raymond.hettinger | 2008-02-28 10:23:48 +0100 (Thu, 28 Feb 2008) | 1 line Add repeat keyword argument to itertools.product(). ........ r61102 | christian.heimes | 2008-02-28 12:18:49 +0100 (Thu, 28 Feb 2008) | 1 line The empty tuple is usually a singleton with a much higher refcnt than 1 ........
This commit is contained in:
parent
380f7f22fa
commit
9e7f1d2e96
|
@ -327,7 +327,7 @@ loops that truncate the stream.
|
|||
example :func:`islice` or :func:`takewhile`).
|
||||
|
||||
|
||||
.. function:: product(*iterables)
|
||||
.. function:: product(*iterables[, repeat])
|
||||
|
||||
Cartesian product of input iterables.
|
||||
|
||||
|
@ -340,11 +340,15 @@ loops that truncate the stream.
|
|||
so that if the inputs iterables are sorted, the product tuples are emitted
|
||||
in sorted order.
|
||||
|
||||
To compute the product of an iterable with itself, specify the number of
|
||||
repetitions with the optional *repeat* keyword argument. For example,
|
||||
``product(A, repeat=4)`` means the same as ``product(A, A, A, A)``.
|
||||
|
||||
Equivalent to the following except that the actual implementation does not
|
||||
build-up intermediate results in memory::
|
||||
|
||||
def product(*args):
|
||||
pools = map(tuple, args)
|
||||
def product(*args, **kwds):
|
||||
pools = map(tuple, args) * kwds.get('repeat', 1)
|
||||
if pools:
|
||||
result = [[]]
|
||||
for pool in pools:
|
||||
|
|
|
@ -532,6 +532,9 @@ given type object has a specified feature.
|
|||
#define Py_TPFLAGS_HAVE_VERSION_TAG (1L<<18)
|
||||
#define Py_TPFLAGS_VALID_VERSION_TAG (1L<<19)
|
||||
|
||||
/* Type is abstract and cannot be instantiated */
|
||||
#define Py_TPFLAGS_IS_ABSTRACT (1L<<20)
|
||||
|
||||
/* These flags are used to determine if a type is a subclass. */
|
||||
#define Py_TPFLAGS_INT_SUBCLASS (1L<<23)
|
||||
#define Py_TPFLAGS_LONG_SUBCLASS (1L<<24)
|
||||
|
|
47
Lib/abc.py
47
Lib/abc.py
|
@ -52,50 +52,6 @@ class abstractproperty(property):
|
|||
__isabstractmethod__ = True
|
||||
|
||||
|
||||
class _Abstract(object):
|
||||
|
||||
"""Helper class inserted into the bases by ABCMeta (using _fix_bases()).
|
||||
|
||||
You should never need to explicitly subclass this class.
|
||||
"""
|
||||
|
||||
def __new__(cls, *args, **kwds):
|
||||
am = cls.__dict__.get("__abstractmethods__")
|
||||
if am:
|
||||
raise TypeError("Can't instantiate abstract class %s "
|
||||
"with abstract methods %s" %
|
||||
(cls.__name__, ", ".join(sorted(am))))
|
||||
if (args or kwds) and cls.__init__ is object.__init__:
|
||||
raise TypeError("Can't pass arguments to __new__ "
|
||||
"without overriding __init__")
|
||||
return super().__new__(cls)
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, subclass):
|
||||
"""Abstract classes can override this to customize issubclass().
|
||||
|
||||
This is invoked early on by __subclasscheck__() below. It
|
||||
should return True, False or NotImplemented. If it returns
|
||||
NotImplemented, the normal algorithm is used. Otherwise, it
|
||||
overrides the normal algorithm (and the outcome is cached).
|
||||
"""
|
||||
return NotImplemented
|
||||
|
||||
|
||||
def _fix_bases(bases):
|
||||
"""Helper method that inserts _Abstract in the bases if needed."""
|
||||
for base in bases:
|
||||
if issubclass(base, _Abstract):
|
||||
# _Abstract is already a base (maybe indirectly)
|
||||
return bases
|
||||
if object in bases:
|
||||
# Replace object with _Abstract
|
||||
return tuple([_Abstract if base is object else base
|
||||
for base in bases])
|
||||
# Append _Abstract to the end
|
||||
return bases + (_Abstract,)
|
||||
|
||||
|
||||
class ABCMeta(type):
|
||||
|
||||
"""Metaclass for defining Abstract Base Classes (ABCs).
|
||||
|
@ -118,7 +74,6 @@ class ABCMeta(type):
|
|||
_abc_invalidation_counter = 0
|
||||
|
||||
def __new__(mcls, name, bases, namespace):
|
||||
bases = _fix_bases(bases)
|
||||
cls = super().__new__(mcls, name, bases, namespace)
|
||||
# Compute set of abstract method names
|
||||
abstracts = {name
|
||||
|
@ -129,7 +84,7 @@ class ABCMeta(type):
|
|||
value = getattr(cls, name, None)
|
||||
if getattr(value, "__isabstractmethod__", False):
|
||||
abstracts.add(name)
|
||||
cls.__abstractmethods__ = abstracts
|
||||
cls.__abstractmethods__ = frozenset(abstracts)
|
||||
# Set up inheritance registry
|
||||
cls._abc_registry = WeakSet()
|
||||
cls._abc_cache = WeakSet()
|
||||
|
|
|
@ -1649,6 +1649,9 @@ class Decimal(_numbers.Real, _numbers.Inexact):
|
|||
else:
|
||||
return -1
|
||||
|
||||
def __round__(self):
|
||||
return self._round_down(0)
|
||||
|
||||
def _round_up(self, prec):
|
||||
"""Rounds away from 0."""
|
||||
return -self._round_down(prec)
|
||||
|
@ -1684,6 +1687,9 @@ class Decimal(_numbers.Real, _numbers.Inexact):
|
|||
else:
|
||||
return -self._round_down(prec)
|
||||
|
||||
def __ceil__(self):
|
||||
return self._round_ceiling(0)
|
||||
|
||||
def _round_floor(self, prec):
|
||||
"""Rounds down (not towards 0 if negative)"""
|
||||
if not self._sign:
|
||||
|
@ -1691,6 +1697,9 @@ class Decimal(_numbers.Real, _numbers.Inexact):
|
|||
else:
|
||||
return -self._round_down(prec)
|
||||
|
||||
def __floor__(self):
|
||||
return self._round_floor(0)
|
||||
|
||||
def _round_05up(self, prec):
|
||||
"""Round down unless digit prec-1 is 0 or 5."""
|
||||
if prec and self._int[prec-1] not in '05':
|
||||
|
|
|
@ -196,6 +196,7 @@ You can get the information from the list type:
|
|||
'__setattr__',
|
||||
'__setitem__',
|
||||
'__str__',
|
||||
'__subclasshook__',
|
||||
'append',
|
||||
'count',
|
||||
'extend',
|
||||
|
|
|
@ -2,13 +2,15 @@
|
|||
Test suite for SocketServer.py.
|
||||
"""
|
||||
|
||||
import os
|
||||
import socket
|
||||
import errno
|
||||
import imp
|
||||
import os
|
||||
import select
|
||||
import time
|
||||
import signal
|
||||
import socket
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
import SocketServer
|
||||
|
||||
|
@ -19,7 +21,6 @@ from test.test_support import TESTFN as TEST_FILE
|
|||
test.test_support.requires("network")
|
||||
|
||||
NREQ = 3
|
||||
DELAY = 0.5
|
||||
TEST_STR = b"hello world\n"
|
||||
HOST = "localhost"
|
||||
|
||||
|
@ -27,14 +28,6 @@ HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
|
|||
HAVE_FORKING = hasattr(os, "fork") and os.name != "os2"
|
||||
|
||||
|
||||
class MyMixinHandler:
|
||||
def handle(self):
|
||||
time.sleep(DELAY)
|
||||
line = self.rfile.readline()
|
||||
time.sleep(DELAY)
|
||||
self.wfile.write(line)
|
||||
|
||||
|
||||
def receive(sock, n, timeout=20):
|
||||
r, w, x = select.select([sock], [], [], timeout)
|
||||
if sock in r:
|
||||
|
@ -42,14 +35,6 @@ def receive(sock, n, timeout=20):
|
|||
else:
|
||||
raise RuntimeError("timed out on %r" % (sock,))
|
||||
|
||||
|
||||
class MyStreamHandler(MyMixinHandler, SocketServer.StreamRequestHandler):
|
||||
pass
|
||||
|
||||
class MyDatagramHandler(MyMixinHandler,
|
||||
SocketServer.DatagramRequestHandler):
|
||||
pass
|
||||
|
||||
if HAVE_UNIX_SOCKETS:
|
||||
class ForkingUnixStreamServer(SocketServer.ForkingMixIn,
|
||||
SocketServer.UnixStreamServer):
|
||||
|
@ -111,47 +96,28 @@ class ServerThread(threading.Thread):
|
|||
pass
|
||||
if verbose: print("thread: creating server")
|
||||
svr = svrcls(self.__addr, self.__hdlrcls)
|
||||
# pull the address out of the server in case it changed
|
||||
# this can happen if another process is using the port
|
||||
addr = svr.server_address
|
||||
if addr:
|
||||
self.__addr = addr
|
||||
if self.__addr != svr.socket.getsockname():
|
||||
# We had the OS pick a port, so pull the real address out of
|
||||
# the server.
|
||||
self.addr = svr.server_address
|
||||
self.port = self.addr[1]
|
||||
if self.addr != svr.socket.getsockname():
|
||||
raise RuntimeError('server_address was %s, expected %s' %
|
||||
(self.__addr, svr.socket.getsockname()))
|
||||
(self.addr, svr.socket.getsockname()))
|
||||
self.ready.set()
|
||||
if verbose: print("thread: serving three times")
|
||||
svr.serve_a_few()
|
||||
if verbose: print("thread: done")
|
||||
|
||||
|
||||
class ForgivingTCPServer(SocketServer.TCPServer):
|
||||
# prevent errors if another process is using the port we want
|
||||
def server_bind(self):
|
||||
host, default_port = self.server_address
|
||||
# this code shamelessly stolen from test.test_support
|
||||
# the ports were changed to protect the innocent
|
||||
import sys
|
||||
for port in [default_port, 3434, 8798, 23833]:
|
||||
try:
|
||||
self.server_address = host, port
|
||||
SocketServer.TCPServer.server_bind(self)
|
||||
break
|
||||
except socket.error as e:
|
||||
(err, msg) = e
|
||||
if err != errno.EADDRINUSE:
|
||||
raise
|
||||
print(' WARNING: failed to listen on port %d, trying another' % port, file=sys.__stderr__)
|
||||
|
||||
class SocketServerTest(unittest.TestCase):
|
||||
"""Test all socket servers."""
|
||||
|
||||
def setUp(self):
|
||||
signal.alarm(20) # Kill deadlocks after 20 seconds.
|
||||
self.port_seed = 0
|
||||
self.test_files = []
|
||||
|
||||
def tearDown(self):
|
||||
time.sleep(DELAY)
|
||||
reap_children()
|
||||
|
||||
for fn in self.test_files:
|
||||
|
@ -160,16 +126,18 @@ class SocketServerTest(unittest.TestCase):
|
|||
except os.error:
|
||||
pass
|
||||
self.test_files[:] = []
|
||||
|
||||
def pickport(self):
|
||||
self.port_seed += 1
|
||||
return 10000 + (os.getpid() % 1000)*10 + self.port_seed
|
||||
signal.alarm(0) # Didn't deadlock.
|
||||
|
||||
def pickaddr(self, proto):
|
||||
if proto == socket.AF_INET:
|
||||
return (HOST, self.pickport())
|
||||
return (HOST, 0)
|
||||
else:
|
||||
fn = TEST_FILE + str(self.pickport())
|
||||
# XXX: We need a way to tell AF_UNIX to pick its own name
|
||||
# like AF_INET provides port==0.
|
||||
dir = None
|
||||
if os.name == 'os2':
|
||||
dir = '\socket'
|
||||
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
|
||||
if os.name == 'os2':
|
||||
# AF_UNIX socket names on OS/2 require a specific prefix
|
||||
# which can't include a drive letter and must also use
|
||||
|
@ -178,7 +146,6 @@ class SocketServerTest(unittest.TestCase):
|
|||
fn = fn[2:]
|
||||
if fn[0] in (os.sep, os.altsep):
|
||||
fn = fn[1:]
|
||||
fn = os.path.join('\socket', fn)
|
||||
if os.sep == '/':
|
||||
fn = fn.replace(os.sep, os.altsep)
|
||||
else:
|
||||
|
@ -186,22 +153,28 @@ class SocketServerTest(unittest.TestCase):
|
|||
self.test_files.append(fn)
|
||||
return fn
|
||||
|
||||
def run_servers(self, proto, servers, hdlrcls, testfunc):
|
||||
for svrcls in servers:
|
||||
addr = self.pickaddr(proto)
|
||||
|
||||
def run_server(self, svrcls, hdlrbase, testfunc):
|
||||
class MyHandler(hdlrbase):
|
||||
def handle(self):
|
||||
line = self.rfile.readline()
|
||||
self.wfile.write(line)
|
||||
|
||||
addr = self.pickaddr(svrcls.address_family)
|
||||
if verbose:
|
||||
print("ADDR =", addr)
|
||||
print("CLASS =", svrcls)
|
||||
t = ServerThread(addr, svrcls, hdlrcls)
|
||||
t = ServerThread(addr, svrcls, MyHandler)
|
||||
if verbose: print("server created")
|
||||
t.start()
|
||||
if verbose: print("server running")
|
||||
for i in range(NREQ):
|
||||
t.ready.wait(10*DELAY)
|
||||
t.ready.wait(10)
|
||||
self.assert_(t.ready.isSet(),
|
||||
"Server not ready within a reasonable time")
|
||||
"%s not ready within a reasonable time" % svrcls)
|
||||
addr = t.addr
|
||||
for i in range(NREQ):
|
||||
if verbose: print("test client", i)
|
||||
testfunc(proto, addr)
|
||||
testfunc(svrcls.address_family, addr)
|
||||
if verbose: print("waiting for server")
|
||||
t.join()
|
||||
if verbose: print("done")
|
||||
|
@ -227,46 +200,73 @@ class SocketServerTest(unittest.TestCase):
|
|||
self.assertEquals(buf, TEST_STR)
|
||||
s.close()
|
||||
|
||||
def test_TCPServers(self):
|
||||
# Test SocketServer.TCPServer
|
||||
servers = [ForgivingTCPServer, SocketServer.ThreadingTCPServer]
|
||||
if HAVE_FORKING:
|
||||
servers.append(SocketServer.ForkingTCPServer)
|
||||
self.run_servers(socket.AF_INET, servers,
|
||||
MyStreamHandler, self.stream_examine)
|
||||
def test_TCPServer(self):
|
||||
self.run_server(SocketServer.TCPServer,
|
||||
SocketServer.StreamRequestHandler,
|
||||
self.stream_examine)
|
||||
|
||||
def test_ThreadingTCPServer(self):
|
||||
self.run_server(SocketServer.ThreadingTCPServer,
|
||||
SocketServer.StreamRequestHandler,
|
||||
self.stream_examine)
|
||||
|
||||
def test_UDPServers(self):
|
||||
# Test SocketServer.UDPServer
|
||||
servers = [SocketServer.UDPServer,
|
||||
SocketServer.ThreadingUDPServer]
|
||||
if HAVE_FORKING:
|
||||
servers.append(SocketServer.ForkingUDPServer)
|
||||
self.run_servers(socket.AF_INET, servers, MyDatagramHandler,
|
||||
def test_ThreadingTCPServer(self):
|
||||
self.run_server(SocketServer.ForkingTCPServer,
|
||||
SocketServer.StreamRequestHandler,
|
||||
self.stream_examine)
|
||||
|
||||
if HAVE_UNIX_SOCKETS:
|
||||
def test_UnixStreamServer(self):
|
||||
self.run_server(SocketServer.UnixStreamServer,
|
||||
SocketServer.StreamRequestHandler,
|
||||
self.stream_examine)
|
||||
|
||||
def test_ThreadingUnixStreamServer(self):
|
||||
self.run_server(SocketServer.ThreadingUnixStreamServer,
|
||||
SocketServer.StreamRequestHandler,
|
||||
self.stream_examine)
|
||||
|
||||
if HAVE_FORKING:
|
||||
def test_ForkingUnixStreamServer(self):
|
||||
self.run_server(ForkingUnixStreamServer,
|
||||
SocketServer.StreamRequestHandler,
|
||||
self.stream_examine)
|
||||
|
||||
def test_UDPServer(self):
|
||||
self.run_server(SocketServer.UDPServer,
|
||||
SocketServer.DatagramRequestHandler,
|
||||
self.dgram_examine)
|
||||
|
||||
def test_ThreadingUDPServer(self):
|
||||
self.run_server(SocketServer.ThreadingUDPServer,
|
||||
SocketServer.DatagramRequestHandler,
|
||||
self.dgram_examine)
|
||||
|
||||
def test_stream_servers(self):
|
||||
# Test SocketServer's stream servers
|
||||
if not HAVE_UNIX_SOCKETS:
|
||||
return
|
||||
servers = [SocketServer.UnixStreamServer,
|
||||
SocketServer.ThreadingUnixStreamServer]
|
||||
if HAVE_FORKING:
|
||||
servers.append(ForkingUnixStreamServer)
|
||||
self.run_servers(socket.AF_UNIX, servers, MyStreamHandler,
|
||||
self.stream_examine)
|
||||
def test_ForkingUDPServer(self):
|
||||
self.run_server(SocketServer.ForkingUDPServer,
|
||||
SocketServer.DatagramRequestHandler,
|
||||
self.dgram_examine)
|
||||
|
||||
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
|
||||
# client address so this cannot work:
|
||||
|
||||
# def test_dgram_servers(self):
|
||||
# # Test SocketServer.UnixDatagramServer
|
||||
# if not HAVE_UNIX_SOCKETS:
|
||||
# return
|
||||
# servers = [SocketServer.UnixDatagramServer,
|
||||
# SocketServer.ThreadingUnixDatagramServer]
|
||||
# if HAVE_UNIX_SOCKETS:
|
||||
# def test_UnixDatagramServer(self):
|
||||
# self.run_server(SocketServer.UnixDatagramServer,
|
||||
# SocketServer.DatagramRequestHandler,
|
||||
# self.dgram_examine)
|
||||
#
|
||||
# def test_ThreadingUnixDatagramServer(self):
|
||||
# self.run_server(SocketServer.ThreadingUnixDatagramServer,
|
||||
# SocketServer.DatagramRequestHandler,
|
||||
# self.dgram_examine)
|
||||
#
|
||||
# if HAVE_FORKING:
|
||||
# servers.append(ForkingUnixDatagramServer)
|
||||
# self.run_servers(socket.AF_UNIX, servers, MyDatagramHandler,
|
||||
# def test_ForkingUnixDatagramServer(self):
|
||||
# self.run_server(SocketServer.ForkingUnixDatagramServer,
|
||||
# SocketServer.DatagramRequestHandler,
|
||||
# self.dgram_examine)
|
||||
|
||||
|
||||
|
@ -279,3 +279,4 @@ def test_main():
|
|||
|
||||
if __name__ == "__main__":
|
||||
test_main()
|
||||
signal.alarm(3) # Shutdown shouldn't take more than 3 seconds.
|
||||
|
|
|
@ -403,7 +403,7 @@ class Thread(_Verbose):
|
|||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
self._daemonic = self._set_daemon()
|
||||
self._started = False
|
||||
self._started = Event()
|
||||
self._stopped = False
|
||||
self._block = Condition(Lock())
|
||||
self._initialized = True
|
||||
|
@ -418,7 +418,7 @@ class Thread(_Verbose):
|
|||
def __repr__(self):
|
||||
assert self._initialized, "Thread.__init__() was not called"
|
||||
status = "initial"
|
||||
if self._started:
|
||||
if self._started.isSet():
|
||||
status = "started"
|
||||
if self._stopped:
|
||||
status = "stopped"
|
||||
|
@ -429,7 +429,8 @@ class Thread(_Verbose):
|
|||
def start(self):
|
||||
if not self._initialized:
|
||||
raise RuntimeError("thread.__init__() not called")
|
||||
if self._started:
|
||||
|
||||
if self._started.isSet():
|
||||
raise RuntimeError("thread already started")
|
||||
if __debug__:
|
||||
self._note("%s.start(): starting thread", self)
|
||||
|
@ -437,8 +438,7 @@ class Thread(_Verbose):
|
|||
_limbo[self] = self
|
||||
_active_limbo_lock.release()
|
||||
_start_new_thread(self._bootstrap, ())
|
||||
self._started = True
|
||||
_sleep(0.000001) # 1 usec, to let the thread run (Solaris hack)
|
||||
self._started.wait()
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
|
@ -455,11 +455,11 @@ class Thread(_Verbose):
|
|||
# happen when a daemon thread wakes up at an unfortunate
|
||||
# moment, finds the world around it destroyed, and raises some
|
||||
# random exception *** while trying to report the exception in
|
||||
# __bootstrap_inner() below ***. Those random exceptions
|
||||
# _bootstrap_inner() below ***. Those random exceptions
|
||||
# don't help anybody, and they confuse users, so we suppress
|
||||
# them. We suppress them only when it appears that the world
|
||||
# indeed has already been destroyed, so that exceptions in
|
||||
# __bootstrap_inner() during normal business hours are properly
|
||||
# _bootstrap_inner() during normal business hours are properly
|
||||
# reported. Also, we only suppress them for daemonic threads;
|
||||
# if a non-daemonic encounters this, something else is wrong.
|
||||
try:
|
||||
|
@ -471,29 +471,29 @@ class Thread(_Verbose):
|
|||
|
||||
def _bootstrap_inner(self):
|
||||
try:
|
||||
self._started = True
|
||||
self._started.set()
|
||||
_active_limbo_lock.acquire()
|
||||
_active[_get_ident()] = self
|
||||
del _limbo[self]
|
||||
_active_limbo_lock.release()
|
||||
if __debug__:
|
||||
self._note("%s.__bootstrap(): thread started", self)
|
||||
self._note("%s._bootstrap(): thread started", self)
|
||||
|
||||
if _trace_hook:
|
||||
self._note("%s.__bootstrap(): registering trace hook", self)
|
||||
self._note("%s._bootstrap(): registering trace hook", self)
|
||||
_sys.settrace(_trace_hook)
|
||||
if _profile_hook:
|
||||
self._note("%s.__bootstrap(): registering profile hook", self)
|
||||
self._note("%s._bootstrap(): registering profile hook", self)
|
||||
_sys.setprofile(_profile_hook)
|
||||
|
||||
try:
|
||||
self.run()
|
||||
except SystemExit:
|
||||
if __debug__:
|
||||
self._note("%s.__bootstrap(): raised SystemExit", self)
|
||||
self._note("%s._bootstrap(): raised SystemExit", self)
|
||||
except:
|
||||
if __debug__:
|
||||
self._note("%s.__bootstrap(): unhandled exception", self)
|
||||
self._note("%s._bootstrap(): unhandled exception", self)
|
||||
# If sys.stderr is no more (most likely from interpreter
|
||||
# shutdown) use self._stderr. Otherwise still use sys (as in
|
||||
# _sys) in case sys.stderr was redefined since the creation of
|
||||
|
@ -526,7 +526,7 @@ class Thread(_Verbose):
|
|||
del exc_type, exc_value, exc_tb
|
||||
else:
|
||||
if __debug__:
|
||||
self._note("%s.__bootstrap(): normal return", self)
|
||||
self._note("%s._bootstrap(): normal return", self)
|
||||
finally:
|
||||
with _active_limbo_lock:
|
||||
self._stop()
|
||||
|
@ -580,7 +580,7 @@ class Thread(_Verbose):
|
|||
def join(self, timeout=None):
|
||||
if not self._initialized:
|
||||
raise RuntimeError("Thread.__init__() not called")
|
||||
if not self._started:
|
||||
if not self._started.isSet():
|
||||
raise RuntimeError("cannot join thread before it is started")
|
||||
if self is currentThread():
|
||||
raise RuntimeError("cannot join current thread")
|
||||
|
@ -621,7 +621,7 @@ class Thread(_Verbose):
|
|||
|
||||
def isAlive(self):
|
||||
assert self._initialized, "Thread.__init__() not called"
|
||||
return self._started and not self._stopped
|
||||
return self._started.isSet() and not self._stopped
|
||||
|
||||
def isDaemon(self):
|
||||
assert self._initialized, "Thread.__init__() not called"
|
||||
|
@ -630,7 +630,7 @@ class Thread(_Verbose):
|
|||
def setDaemon(self, daemonic):
|
||||
if not self._initialized:
|
||||
raise RuntimeError("Thread.__init__() not called")
|
||||
if self._started:
|
||||
if self._started.isSet():
|
||||
raise RuntimeError("cannot set daemon status of active thread");
|
||||
self._daemonic = daemonic
|
||||
|
||||
|
@ -672,7 +672,7 @@ class _MainThread(Thread):
|
|||
|
||||
def __init__(self):
|
||||
Thread.__init__(self, name="MainThread")
|
||||
self._started = True
|
||||
self._started.set()
|
||||
_active_limbo_lock.acquire()
|
||||
_active[_get_ident()] = self
|
||||
_active_limbo_lock.release()
|
||||
|
@ -718,7 +718,8 @@ class _DummyThread(Thread):
|
|||
# instance is immortal, that's bad, so release this resource.
|
||||
del self._block
|
||||
|
||||
self._started = True
|
||||
|
||||
self._started.set()
|
||||
_active_limbo_lock.acquire()
|
||||
_active[_get_ident()] = self
|
||||
_active_limbo_lock.release()
|
||||
|
|
|
@ -318,6 +318,40 @@ type_set_module(PyTypeObject *type, PyObject *value, void *context)
|
|||
return PyDict_SetItemString(type->tp_dict, "__module__", value);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
type_abstractmethods(PyTypeObject *type, void *context)
|
||||
{
|
||||
PyObject *mod = PyDict_GetItemString(type->tp_dict,
|
||||
"__abstractmethods__");
|
||||
if (!mod) {
|
||||
PyErr_Format(PyExc_AttributeError, "__abstractmethods__");
|
||||
return NULL;
|
||||
}
|
||||
Py_XINCREF(mod);
|
||||
return mod;
|
||||
}
|
||||
|
||||
static int
|
||||
type_set_abstractmethods(PyTypeObject *type, PyObject *value, void *context)
|
||||
{
|
||||
/* __abstractmethods__ should only be set once on a type, in
|
||||
abc.ABCMeta.__new__, so this function doesn't do anything
|
||||
special to update subclasses.
|
||||
*/
|
||||
int res = PyDict_SetItemString(type->tp_dict,
|
||||
"__abstractmethods__", value);
|
||||
if (res == 0) {
|
||||
type_modified(type);
|
||||
if (value && PyObject_IsTrue(value)) {
|
||||
type->tp_flags |= Py_TPFLAGS_IS_ABSTRACT;
|
||||
}
|
||||
else {
|
||||
type->tp_flags &= ~Py_TPFLAGS_IS_ABSTRACT;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
type_get_bases(PyTypeObject *type, void *context)
|
||||
{
|
||||
|
@ -555,6 +589,8 @@ static PyGetSetDef type_getsets[] = {
|
|||
{"__name__", (getter)type_name, (setter)type_set_name, NULL},
|
||||
{"__bases__", (getter)type_get_bases, (setter)type_set_bases, NULL},
|
||||
{"__module__", (getter)type_module, (setter)type_set_module, NULL},
|
||||
{"__abstractmethods__", (getter)type_abstractmethods,
|
||||
(setter)type_set_abstractmethods, NULL},
|
||||
{"__dict__", (getter)type_dict, NULL, NULL},
|
||||
{"__doc__", (getter)type_get_doc, NULL, NULL},
|
||||
{0}
|
||||
|
@ -2638,6 +2674,52 @@ object_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
|
|||
}
|
||||
if (err < 0)
|
||||
return NULL;
|
||||
|
||||
if (type->tp_flags & Py_TPFLAGS_IS_ABSTRACT) {
|
||||
static PyObject *comma = NULL;
|
||||
PyObject *abstract_methods = NULL;
|
||||
PyObject *builtins;
|
||||
PyObject *sorted;
|
||||
PyObject *sorted_methods = NULL;
|
||||
PyObject *joined = NULL;
|
||||
|
||||
/* Compute ", ".join(sorted(type.__abstractmethods__))
|
||||
into joined. */
|
||||
abstract_methods = type_abstractmethods(type, NULL);
|
||||
if (abstract_methods == NULL)
|
||||
goto error;
|
||||
builtins = PyEval_GetBuiltins();
|
||||
if (builtins == NULL)
|
||||
goto error;
|
||||
sorted = PyDict_GetItemString(builtins, "sorted");
|
||||
if (sorted == NULL)
|
||||
goto error;
|
||||
sorted_methods = PyObject_CallFunctionObjArgs(sorted,
|
||||
abstract_methods,
|
||||
NULL);
|
||||
if (sorted_methods == NULL)
|
||||
goto error;
|
||||
if (comma == NULL) {
|
||||
comma = PyUnicode_InternFromString(", ");
|
||||
if (comma == NULL)
|
||||
goto error;
|
||||
}
|
||||
joined = PyObject_CallMethod(comma, "join",
|
||||
"O", sorted_methods);
|
||||
if (joined == NULL)
|
||||
goto error;
|
||||
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Can't instantiate abstract class %s "
|
||||
"with abstract methods %U",
|
||||
type->tp_name,
|
||||
joined);
|
||||
error:
|
||||
Py_XDECREF(joined);
|
||||
Py_XDECREF(sorted_methods);
|
||||
Py_XDECREF(abstract_methods);
|
||||
return NULL;
|
||||
}
|
||||
return type->tp_alloc(type, 0);
|
||||
}
|
||||
|
||||
|
@ -3143,6 +3225,20 @@ object_reduce_ex(PyObject *self, PyObject *args)
|
|||
return _common_reduce(self, proto);
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
object_subclasshook(PyObject *cls, PyObject *args)
|
||||
{
|
||||
Py_INCREF(Py_NotImplemented);
|
||||
return Py_NotImplemented;
|
||||
}
|
||||
|
||||
PyDoc_STRVAR(object_subclasshook_doc,
|
||||
"Abstract classes can override this to customize issubclass().\n"
|
||||
"\n"
|
||||
"This is invoked early on by abc.ABCMeta.__subclasscheck__().\n"
|
||||
"It should return True, False or NotImplemented. If it returns\n"
|
||||
"NotImplemented, the normal algorithm is used. Otherwise, it\n"
|
||||
"overrides the normal algorithm (and the outcome is cached).\n");
|
||||
|
||||
/*
|
||||
from PEP 3101, this code implements:
|
||||
|
@ -3183,6 +3279,8 @@ static PyMethodDef object_methods[] = {
|
|||
PyDoc_STR("helper for pickle")},
|
||||
{"__reduce__", object_reduce, METH_VARARGS,
|
||||
PyDoc_STR("helper for pickle")},
|
||||
{"__subclasshook__", object_subclasshook, METH_CLASS | METH_VARARGS,
|
||||
object_subclasshook_doc},
|
||||
{"__format__", object_format, METH_VARARGS,
|
||||
PyDoc_STR("default object formatter")},
|
||||
{0}
|
||||
|
|
Loading…
Reference in New Issue