This commit is contained in:
Larry Hastings 2013-09-09 21:12:21 +09:00
commit 8568f66daf
31 changed files with 529 additions and 364 deletions

View File

@ -38,7 +38,8 @@ follows::
... blue = 3 ... blue = 3
... ...
..note: Nomenclature .. note:: Nomenclature
- The class :class:`Color` is an *enumeration* (or *enum*) - The class :class:`Color` is an *enumeration* (or *enum*)
- The attributes :attr:`Color.red`, :attr:`Color.green`, etc., are - The attributes :attr:`Color.red`, :attr:`Color.green`, etc., are
*enumeration members* (or *enum members*). *enumeration members* (or *enum members*).
@ -474,7 +475,7 @@ Some rules:
4. %-style formatting: `%s` and `%r` call :class:`Enum`'s :meth:`__str__` and 4. %-style formatting: `%s` and `%r` call :class:`Enum`'s :meth:`__str__` and
:meth:`__repr__` respectively; other codes (such as `%i` or `%h` for :meth:`__repr__` respectively; other codes (such as `%i` or `%h` for
IntEnum) treat the enum member as its mixed-in type. IntEnum) treat the enum member as its mixed-in type.
5. :class:`str`.:meth:`__format__` (or :func:`format`) will use the mixed-in 5. :meth:`str.__format__` (or :func:`format`) will use the mixed-in
type's :meth:`__format__`. If the :class:`Enum`'s :func:`str` or type's :meth:`__format__`. If the :class:`Enum`'s :func:`str` or
:func:`repr` is desired use the `!s` or `!r` :class:`str` format codes. :func:`repr` is desired use the `!s` or `!r` :class:`str` format codes.

View File

@ -48,6 +48,7 @@ Iterator Arguments Results
==================== ============================ ================================================= ============================================================= ==================== ============================ ================================================= =============================================================
:func:`accumulate` p [,func] p0, p0+p1, p0+p1+p2, ... ``accumulate([1,2,3,4,5]) --> 1 3 6 10 15`` :func:`accumulate` p [,func] p0, p0+p1, p0+p1+p2, ... ``accumulate([1,2,3,4,5]) --> 1 3 6 10 15``
:func:`chain` p, q, ... p0, p1, ... plast, q0, q1, ... ``chain('ABC', 'DEF') --> A B C D E F`` :func:`chain` p, q, ... p0, p1, ... plast, q0, q1, ... ``chain('ABC', 'DEF') --> A B C D E F``
chain.from_iterable iterable p0, p1, ... plast, q0, q1, ... ``chain.from_iterable(['ABC', 'DEF']) --> A B C D E F``
:func:`compress` data, selectors (d[0] if s[0]), (d[1] if s[1]), ... ``compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F`` :func:`compress` data, selectors (d[0] if s[0]), (d[1] if s[1]), ... ``compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F``
:func:`dropwhile` pred, seq seq[n], seq[n+1], starting when pred fails ``dropwhile(lambda x: x<5, [1,4,6,4,1]) --> 6 4 1`` :func:`dropwhile` pred, seq seq[n], seq[n+1], starting when pred fails ``dropwhile(lambda x: x<5, [1,4,6,4,1]) --> 6 4 1``
:func:`filterfalse` pred, seq elements of seq where pred(elem) is False ``filterfalse(lambda x: x%2, range(10)) --> 0 2 4 6 8`` :func:`filterfalse` pred, seq elements of seq where pred(elem) is False ``filterfalse(lambda x: x%2, range(10)) --> 0 2 4 6 8``
@ -156,9 +157,8 @@ loops that truncate the stream.
.. classmethod:: chain.from_iterable(iterable) .. classmethod:: chain.from_iterable(iterable)
Alternate constructor for :func:`chain`. Gets chained inputs from a Alternate constructor for :func:`chain`. Gets chained inputs from a
single iterable argument that is evaluated lazily. Equivalent to:: single iterable argument that is evaluated lazily. Roughly equivalent to::
@classmethod
def from_iterable(iterables): def from_iterable(iterables):
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F # chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
for it in iterables: for it in iterables:

View File

@ -757,8 +757,6 @@ as internal buffering of data.
As of Python 3.3, this is equivalent to ``os.pathconf(fd, name)``. As of Python 3.3, this is equivalent to ``os.pathconf(fd, name)``.
Availability: Unix.
.. function:: fstat(fd) .. function:: fstat(fd)

View File

@ -247,11 +247,13 @@ functions:
import cProfile, pstats, io import cProfile, pstats, io
pr = cProfile.Profile() pr = cProfile.Profile()
pr.enable() pr.enable()
... do something ... # ... do something ...
pr.disable() pr.disable()
s = io.StringIO() s = io.StringIO()
ps = pstats.Stats(pr, stream=s) sortby = 'cumulative'
ps.print_results() ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
.. method:: enable() .. method:: enable()

View File

@ -263,12 +263,15 @@ The :mod:`test.support` module defines the following functions:
Used when tests are executed by :mod:`test.regrtest`. Used when tests are executed by :mod:`test.regrtest`.
.. function:: findfile(filename) .. function:: findfile(filename, subdir=None)
Return the path to the file named *filename*. If no match is found Return the path to the file named *filename*. If no match is found
*filename* is returned. This does not equal a failure since it could be the *filename* is returned. This does not equal a failure since it could be the
path to the file. path to the file.
Setting *subdir* indicates a relative path to use to find the file
rather than looking directly in the path directories.
.. function:: run_unittest(\*classes) .. function:: run_unittest(\*classes)

View File

@ -1674,8 +1674,7 @@ Loading and running tests
A list containing 2-tuples of :class:`TestCase` instances and strings A list containing 2-tuples of :class:`TestCase` instances and strings
holding formatted tracebacks. Each tuple represents a test where a failure holding formatted tracebacks. Each tuple represents a test where a failure
was explicitly signalled using the :meth:`TestCase.fail\*` or was explicitly signalled using the :meth:`TestCase.assert\*` methods.
:meth:`TestCase.assert\*` methods.
.. attribute:: skipped .. attribute:: skipped
@ -1772,7 +1771,7 @@ Loading and running tests
.. method:: addError(test, err) .. method:: addError(test, err)
Called when the test case *test* raises an unexpected exception *err* is a Called when the test case *test* raises an unexpected exception. *err* is a
tuple of the form returned by :func:`sys.exc_info`: ``(type, value, tuple of the form returned by :func:`sys.exc_info`: ``(type, value,
traceback)``. traceback)``.

View File

@ -511,9 +511,9 @@ conflict.
.. envvar:: PYTHONDONTWRITEBYTECODE .. envvar:: PYTHONDONTWRITEBYTECODE
If this is set, Python won't try to write ``.pyc`` or ``.pyo`` files on the If this is set to a non-empty string, Python won't try to write ``.pyc`` or
import of source modules. This is equivalent to specifying the :option:`-B` ``.pyo`` files on the import of source modules. This is equivalent to
option. specifying the :option:`-B` option.
.. envvar:: PYTHONHASHSEED .. envvar:: PYTHONHASHSEED
@ -582,11 +582,11 @@ conflict.
.. envvar:: PYTHONFAULTHANDLER .. envvar:: PYTHONFAULTHANDLER
If this environment variable is set, :func:`faulthandler.enable` is called If this environment variable is set to a non-empty string,
at startup: install a handler for :const:`SIGSEGV`, :const:`SIGFPE`, :func:`faulthandler.enable` is called at startup: install a handler for
:const:`SIGABRT`, :const:`SIGBUS` and :const:`SIGILL` signals to dump the :const:`SIGSEGV`, :const:`SIGFPE`, :const:`SIGABRT`, :const:`SIGBUS` and
Python traceback. This is equivalent to :option:`-X` ``faulthandler`` :const:`SIGILL` signals to dump the Python traceback. This is equivalent to
option. :option:`-X` ``faulthandler`` option.
.. versionadded:: 3.3 .. versionadded:: 3.3

View File

@ -118,6 +118,32 @@ typedef struct _ts {
int trash_delete_nesting; int trash_delete_nesting;
PyObject *trash_delete_later; PyObject *trash_delete_later;
/* Called when a thread state is deleted normally, but not when it
* is destroyed after fork().
* Pain: to prevent rare but fatal shutdown errors (issue 18808),
* Thread.join() must wait for the join'ed thread's tstate to be unlinked
* from the tstate chain. That happens at the end of a thread's life,
* in pystate.c.
* The obvious way doesn't quite work: create a lock which the tstate
* unlinking code releases, and have Thread.join() wait to acquire that
* lock. The problem is that we _are_ at the end of the thread's life:
* if the thread holds the last reference to the lock, decref'ing the
* lock will delete the lock, and that may trigger arbitrary Python code
* if there's a weakref, with a callback, to the lock. But by this time
* _PyThreadState_Current is already NULL, so only the simplest of C code
* can be allowed to run (in particular it must not be possible to
* release the GIL).
* So instead of holding the lock directly, the tstate holds a weakref to
* the lock: that's the value of on_delete_data below. Decref'ing a
* weakref is harmless.
* on_delete points to _threadmodule.c's static release_sentinel() function.
* After the tstate is unlinked, release_sentinel is called with the
* weakref-to-lock (on_delete_data) argument, and release_sentinel releases
* the indirectly held lock.
*/
void (*on_delete)(void *);
void *on_delete_data;
/* XXX signal handlers should also be here */ /* XXX signal handlers should also be here */
} PyThreadState; } PyThreadState;

View File

@ -105,7 +105,6 @@ PyAPI_FUNC(PyObject *) PySet_Pop(PyObject *set);
PyAPI_FUNC(int) _PySet_Update(PyObject *set, PyObject *iterable); PyAPI_FUNC(int) _PySet_Update(PyObject *set, PyObject *iterable);
PyAPI_FUNC(int) PySet_ClearFreeList(void); PyAPI_FUNC(int) PySet_ClearFreeList(void);
PyAPI_FUNC(void) _PySet_DebugMallocStats(FILE *out);
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -81,6 +81,10 @@ def stack_size(size=None):
raise error("setting thread stack size not supported") raise error("setting thread stack size not supported")
return 0 return 0
def _set_sentinel():
"""Dummy implementation of _thread._set_sentinel()."""
return LockType()
class LockType(object): class LockType(object):
"""Class implementing dummy implementation of _thread.LockType. """Class implementing dummy implementation of _thread.LockType.

View File

@ -21,7 +21,7 @@ the General Decimal Arithmetic Specification:
and IEEE standard 854-1987: and IEEE standard 854-1987:
www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds. Decimal floating point has finite precision with arbitrarily large bounds.

View File

@ -878,13 +878,21 @@ else:
import selectors import selectors
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_WaitSelector = selectors.PollSelector
else:
_WaitSelector = selectors.SelectSelector
def wait(object_list, timeout=None): def wait(object_list, timeout=None):
''' '''
Wait till an object in object_list is ready/readable. Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable. Returns list of those objects in object_list which are ready/readable.
''' '''
with selectors.DefaultSelector() as selector: with _WaitSelector() as selector:
for obj in object_list: for obj in object_list:
selector.register(obj, selectors.EVENT_READ) selector.register(obj, selectors.EVENT_READ)

View File

@ -860,24 +860,31 @@ if hasattr(os, "umask"):
finally: finally:
os.umask(oldmask) os.umask(oldmask)
# TEST_HOME refers to the top level directory of the "test" package # TEST_HOME_DIR refers to the top level directory of the "test" package
# that contains Python's regression test suite # that contains Python's regression test suite
TEST_HOME = os.path.dirname(os.path.abspath(__file__)) TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
def findfile(file, here=TEST_HOME, subdir=None): # TEST_DATA_DIR is used as a target download location for remote resources
TEST_DATA_DIR = os.path.join(TEST_HOME_DIR, "data")
def findfile(filename, subdir=None):
"""Try to find a file on sys.path or in the test directory. If it is not """Try to find a file on sys.path or in the test directory. If it is not
found the argument passed to the function is returned (this does not found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path).""" necessarily signal failure; could still be the legitimate path).
if os.path.isabs(file):
return file Setting *subdir* indicates a relative path to use to find the file
rather than looking directly in the path directories.
"""
if os.path.isabs(filename):
return filename
if subdir is not None: if subdir is not None:
file = os.path.join(subdir, file) filename = os.path.join(subdir, filename)
path = sys.path path = [TEST_HOME_DIR] + sys.path
path = [os.path.dirname(here)] + path
for dn in path: for dn in path:
fn = os.path.join(dn, file) fn = os.path.join(dn, filename)
if os.path.exists(fn): return fn if os.path.exists(fn): return fn
return file return filename
def create_empty_file(filename): def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it.""" """Create an empty file. If the file already exists, truncate it."""
@ -914,7 +921,7 @@ def open_urlresource(url, *args, **kw):
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL! filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename) fn = os.path.join(TEST_DATA_DIR, filename)
def check_valid_file(fn): def check_valid_file(fn):
f = open(fn, *args, **kw) f = open(fn, *args, **kw)

View File

@ -265,17 +265,33 @@ faulthandler._sigsegv()
# By default, the module should be disabled # By default, the module should be disabled
code = "import faulthandler; print(faulthandler.is_enabled())" code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, '-E', '-c', code) args = (sys.executable, '-E', '-c', code)
# use subprocess module directly because test.script_helper adds # don't use assert_python_ok() because it always enable faulthandler
# "-X faulthandler" to the command line output = subprocess.check_output(args)
stdout = subprocess.check_output(args) self.assertEqual(output.rstrip(), b"False")
self.assertEqual(stdout.rstrip(), b"False")
def test_sys_xoptions(self): def test_sys_xoptions(self):
# Test python -X faulthandler # Test python -X faulthandler
code = "import faulthandler; print(faulthandler.is_enabled())" code = "import faulthandler; print(faulthandler.is_enabled())"
rc, stdout, stderr = assert_python_ok("-X", "faulthandler", "-c", code) args = (sys.executable, "-E", "-X", "faulthandler", "-c", code)
stdout = (stdout + stderr).strip() # don't use assert_python_ok() because it always enable faulthandler
self.assertEqual(stdout, b"True") output = subprocess.check_output(args)
self.assertEqual(output.rstrip(), b"True")
def test_env_var(self):
# empty env var
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, "-c", code)
env = os.environ.copy()
env['PYTHONFAULTHANDLER'] = ''
# don't use assert_python_ok() because it always enable faulthandler
output = subprocess.check_output(args, env=env)
self.assertEqual(output.rstrip(), b"False")
# non-empty env var
env = os.environ.copy()
env['PYTHONFAULTHANDLER'] = '1'
output = subprocess.check_output(args, env=env)
self.assertEqual(output.rstrip(), b"True")
def check_dump_traceback(self, filename): def check_dump_traceback(self, filename):
""" """

View File

@ -34,6 +34,10 @@ try:
import resource import resource
except ImportError: except ImportError:
resource = None resource = None
try:
import fcntl
except ImportError:
fcntl = None
from test.script_helper import assert_python_ok from test.script_helper import assert_python_ok
@ -2300,19 +2304,38 @@ class CPUCountTests(unittest.TestCase):
class FDInheritanceTests(unittest.TestCase): class FDInheritanceTests(unittest.TestCase):
def test_get_inheritable(self): def test_get_set_inheritable(self):
fd = os.open(__file__, os.O_RDONLY) fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd) self.addCleanup(os.close, fd)
for inheritable in (False, True): self.assertEqual(os.get_inheritable(fd), False)
os.set_inheritable(fd, inheritable)
self.assertEqual(os.get_inheritable(fd), inheritable)
def test_set_inheritable(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
os.set_inheritable(fd, True) os.set_inheritable(fd, True)
self.assertEqual(os.get_inheritable(fd), True) self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
os.set_inheritable(fd, True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_open(self): def test_open(self):
fd = os.open(__file__, os.O_RDONLY) fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd) self.addCleanup(os.close, fd)

View File

@ -3,6 +3,7 @@ Tests of regrtest.py.
""" """
import argparse import argparse
import faulthandler
import getopt import getopt
import os.path import os.path
import unittest import unittest
@ -25,6 +26,8 @@ class ParseArgsTestCase(unittest.TestCase):
regrtest._parse_args([opt]) regrtest._parse_args([opt])
self.assertIn('Run Python regression tests.', out.getvalue()) self.assertIn('Run Python regression tests.', out.getvalue())
@unittest.skipUnless(hasattr(faulthandler, 'dump_traceback_later'),
"faulthandler.dump_traceback_later() required")
def test_timeout(self): def test_timeout(self):
ns = regrtest._parse_args(['--timeout', '4.2']) ns = regrtest._parse_args(['--timeout', '4.2'])
self.assertEqual(ns.timeout, 4.2) self.assertEqual(ns.timeout, 4.2)

View File

@ -301,6 +301,7 @@ class BaseSelectorTestCase(unittest.TestCase):
class ScalableSelectorMixIn: class ScalableSelectorMixIn:
# see issue #18963 for why it's skipped on older OS X versions
@support.requires_mac_ver(10, 5) @support.requires_mac_ver(10, 5)
@unittest.skipUnless(resource, "Test needs resource module") @unittest.skipUnless(resource, "Test needs resource module")
def test_above_fd_setsize(self): def test_above_fd_setsize(self):
@ -313,7 +314,7 @@ class ScalableSelectorMixIn:
self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
(soft, hard)) (soft, hard))
NUM_FDS = hard NUM_FDS = hard
except OSError: except (OSError, ValueError):
NUM_FDS = soft NUM_FDS = soft
# guard for already allocated FDs (stdin, stdout...) # guard for already allocated FDs (stdin, stdout...)

View File

@ -5,6 +5,7 @@ executing have not been removed.
""" """
import unittest import unittest
import test.support
from test.support import run_unittest, TESTFN, EnvironmentVarGuard from test.support import run_unittest, TESTFN, EnvironmentVarGuard
from test.support import captured_stderr from test.support import captured_stderr
import builtins import builtins
@ -373,9 +374,10 @@ class ImportSideEffectTests(unittest.TestCase):
self.assertTrue(hasattr(builtins, "exit")) self.assertTrue(hasattr(builtins, "exit"))
def test_setting_copyright(self): def test_setting_copyright(self):
# 'copyright' and 'credits' should be in builtins # 'copyright', 'credits', and 'license' should be in builtins
self.assertTrue(hasattr(builtins, "copyright")) self.assertTrue(hasattr(builtins, "copyright"))
self.assertTrue(hasattr(builtins, "credits")) self.assertTrue(hasattr(builtins, "credits"))
self.assertTrue(hasattr(builtins, "license"))
def test_setting_help(self): def test_setting_help(self):
# 'help' should be set in builtins # 'help' should be set in builtins
@ -402,5 +404,27 @@ class ImportSideEffectTests(unittest.TestCase):
self.fail("sitecustomize not imported automatically") self.fail("sitecustomize not imported automatically")
class LicenseURL(unittest.TestCase):
"""Test accessibility of the license."""
@unittest.skipUnless(str(license).startswith('See http://'),
'license is available as a file')
def test_license_page(self):
"""urlopen should return the license page"""
pat = r'^See (http://www\.python\.org/download/releases/[^/]+/license/)$'
mo = re.search(pat, str(license))
self.assertIsNotNone(mo, msg='can\'t find appropriate url in license')
if mo is not None:
url = mo.group(1)
with test.support.transient_internet(url):
import urllib.request, urllib.error
try:
with urllib.request.urlopen(url) as data:
code = data.getcode()
except urllib.error.HTTPError as e:
code = e.code
self.assertEqual(code, 200, msg=url)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -26,6 +26,10 @@ try:
import multiprocessing import multiprocessing
except ImportError: except ImportError:
multiprocessing = False multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
@ -4804,6 +4808,33 @@ class InheritanceTest(unittest.TestCase):
sock.set_inheritable(False) sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False) self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"), @unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()") "need socket.socketpair()")
def test_socketpair(self): def test_socketpair(self):

View File

@ -15,6 +15,14 @@ support.import_fresh_module('tkinter')
from tkinter import Tcl from tkinter import Tcl
from _tkinter import TclError from _tkinter import TclError
tcl_version = _tkinter.TCL_VERSION.split('.')
try:
for i in range(len(tcl_version)):
tcl_version[i] = int(tcl_version[i])
except ValueError:
pass
tcl_version = tuple(tcl_version)
class TkinterTest(unittest.TestCase): class TkinterTest(unittest.TestCase):
@ -200,6 +208,9 @@ class TclTest(unittest.TestCase):
(('a', 3.4), ('a', 3.4)), (('a', 3.4), ('a', 3.4)),
((), ()), ((), ()),
(call('list', 1, '2', (3.4,)), (1, '2', (3.4,))), (call('list', 1, '2', (3.4,)), (1, '2', (3.4,))),
]
if tcl_version >= (8, 5):
testcases += [
(call('dict', 'create', 1, '\u20ac', b'\xe2\x82\xac', (3.4,)), (call('dict', 'create', 1, '\u20ac', b'\xe2\x82\xac', (3.4,)),
(1, '\u20ac', '\u20ac', (3.4,))), (1, '\u20ac', '\u20ac', (3.4,))),
] ]
@ -234,6 +245,9 @@ class TclTest(unittest.TestCase):
(('a', (2, 3.4)), ('a', (2, 3.4))), (('a', (2, 3.4)), ('a', (2, 3.4))),
((), ()), ((), ()),
(call('list', 1, '2', (3.4,)), (1, '2', (3.4,))), (call('list', 1, '2', (3.4,)), (1, '2', (3.4,))),
]
if tcl_version >= (8, 5):
testcases += [
(call('dict', 'create', 12, '\u20ac', b'\xe2\x82\xac', (3.4,)), (call('dict', 'create', 12, '\u20ac', b'\xe2\x82\xac', (3.4,)),
(12, '\u20ac', '\u20ac', (3.4,))), (12, '\u20ac', '\u20ac', (3.4,))),
] ]

View File

@ -109,7 +109,7 @@ class ThreadTests(BaseTestCase):
if verbose: if verbose:
print('waiting for all tasks to complete') print('waiting for all tasks to complete')
for t in threads: for t in threads:
t.join(NUMTASKS) t.join()
self.assertTrue(not t.is_alive()) self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0) self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None) self.assertFalse(t.ident is None)
@ -539,6 +539,40 @@ class ThreadTests(BaseTestCase):
self.assertEqual(err, b"") self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n") self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
class ThreadJoinOnShutdown(BaseTestCase): class ThreadJoinOnShutdown(BaseTestCase):
@ -613,144 +647,8 @@ class ThreadJoinOnShutdown(BaseTestCase):
""" """
self._run_and_join(script) self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug") @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self): def test_4_daemon_threads(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_6_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown # Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in # by manipulating internal structures that are being disposed of in
# the main thread. # the main thread.
@ -867,6 +765,38 @@ class SubinterpThreadingTests(BaseTestCase):
# The thread was joined properly. # The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x") self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = _testcapi.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_daemon_threads_fatal_error(self): def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1: subinterp_code = r"""if 1:
import os import os

View File

@ -33,6 +33,7 @@ __all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
# Rename some stuff so "from threading import *" is safe # Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread _start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock _allocate_lock = _thread.allocate_lock
_set_sentinel = _thread._set_sentinel
get_ident = _thread.get_ident get_ident = _thread.get_ident
ThreadError = _thread.error ThreadError = _thread.error
try: try:
@ -516,8 +517,6 @@ def _newname(template="Thread-%d"):
_active_limbo_lock = _allocate_lock() _active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object _active = {} # maps thread id to Thread object
_limbo = {} _limbo = {}
# For debug and leak testing
_dangling = WeakSet() _dangling = WeakSet()
# Main class for threads # Main class for threads
@ -548,28 +547,33 @@ class Thread:
else: else:
self._daemonic = current_thread().daemon self._daemonic = current_thread().daemon
self._ident = None self._ident = None
self._tstate_lock = None
self._started = Event() self._started = Event()
self._stopped = False self._is_stopped = False
self._block = Condition(Lock())
self._initialized = True self._initialized = True
# sys.stderr is not stored in the class like # sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances # sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr self._stderr = _sys.stderr
# For debugging and _after_fork()
_dangling.add(self) _dangling.add(self)
def _reset_internal_locks(self): def _reset_internal_locks(self, is_alive):
# private! Called by _after_fork() to reset our internal locks as # private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash. # they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks() self._started._reset_internal_locks()
if is_alive:
self._set_tstate_lock()
else:
# The thread isn't alive after fork: it doesn't have a tstate
# anymore.
self._tstate_lock = None
def __repr__(self): def __repr__(self):
assert self._initialized, "Thread.__init__() was not called" assert self._initialized, "Thread.__init__() was not called"
status = "initial" status = "initial"
if self._started.is_set(): if self._started.is_set():
status = "started" status = "started"
if self._stopped: if self._is_stopped:
status = "stopped" status = "stopped"
if self._daemonic: if self._daemonic:
status += " daemon" status += " daemon"
@ -625,9 +629,18 @@ class Thread:
def _set_ident(self): def _set_ident(self):
self._ident = get_ident() self._ident = get_ident()
def _set_tstate_lock(self):
"""
Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted.
"""
self._tstate_lock = _set_sentinel()
self._tstate_lock.acquire()
def _bootstrap_inner(self): def _bootstrap_inner(self):
try: try:
self._set_ident() self._set_ident()
self._set_tstate_lock()
self._started.set() self._started.set()
with _active_limbo_lock: with _active_limbo_lock:
_active[self._ident] = self _active[self._ident] = self
@ -682,7 +695,6 @@ class Thread:
pass pass
finally: finally:
with _active_limbo_lock: with _active_limbo_lock:
self._stop()
try: try:
# We don't call self._delete() because it also # We don't call self._delete() because it also
# grabs _active_limbo_lock. # grabs _active_limbo_lock.
@ -691,10 +703,8 @@ class Thread:
pass pass
def _stop(self): def _stop(self):
self._block.acquire() self._is_stopped = True
self._stopped = True self._tstate_lock = None
self._block.notify_all()
self._block.release()
def _delete(self): def _delete(self):
"Remove current thread from the dict of currently running threads." "Remove current thread from the dict of currently running threads."
@ -738,21 +748,24 @@ class Thread:
raise RuntimeError("cannot join thread before it is started") raise RuntimeError("cannot join thread before it is started")
if self is current_thread(): if self is current_thread():
raise RuntimeError("cannot join current thread") raise RuntimeError("cannot join current thread")
self._block.acquire()
try:
if timeout is None: if timeout is None:
while not self._stopped: self._wait_for_tstate_lock()
self._block.wait()
else: else:
deadline = _time() + timeout self._wait_for_tstate_lock(timeout=timeout)
while not self._stopped:
delay = deadline - _time() def _wait_for_tstate_lock(self, block=True, timeout=-1):
if delay <= 0: # Issue #18808: wait for the thread state to be gone.
break # At the end of the thread's life, after all knowledge of the thread
self._block.wait(delay) # is removed from C data structures, C code releases our _tstate_lock.
finally: # This method passes its arguments to _tstate_lock.aquire().
self._block.release() # If the lock is acquired, the C code is done, and self._stop() is
# called. That sets ._is_stopped to True, and ._tstate_lock to None.
lock = self._tstate_lock
if lock is None: # already determined that the C code is done
assert self._is_stopped
elif lock.acquire(block, timeout):
lock.release()
self._stop()
@property @property
def name(self): def name(self):
@ -771,7 +784,10 @@ class Thread:
def is_alive(self): def is_alive(self):
assert self._initialized, "Thread.__init__() not called" assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped if self._is_stopped or not self._started.is_set():
return False
self._wait_for_tstate_lock(False)
return not self._is_stopped
isAlive = is_alive isAlive = is_alive
@ -835,6 +851,7 @@ class _MainThread(Thread):
def __init__(self): def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False) Thread.__init__(self, name="MainThread", daemon=False)
self._set_tstate_lock()
self._started.set() self._started.set()
self._set_ident() self._set_ident()
with _active_limbo_lock: with _active_limbo_lock:
@ -854,11 +871,6 @@ class _DummyThread(Thread):
def __init__(self): def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True) Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set() self._started.set()
self._set_ident() self._set_ident()
with _active_limbo_lock: with _active_limbo_lock:
@ -904,6 +916,14 @@ from _thread import stack_size
_main_thread = _MainThread() _main_thread = _MainThread()
def _shutdown(): def _shutdown():
# Obscure: other threads may be waiting to join _main_thread. That's
# dubious, but some code does it. We can't wait for C code to release
# the main thread's tstate_lock - that won't happen until the interpreter
# is nearly dead. So we release it here. Note that just calling _stop()
# isn't enough: other threads may already be waiting on _tstate_lock.
assert _main_thread._tstate_lock is not None
assert _main_thread._tstate_lock.locked()
_main_thread._tstate_lock.release()
_main_thread._stop() _main_thread._stop()
t = _pickSomeNonDaemonThread() t = _pickSomeNonDaemonThread()
while t: while t:
@ -949,18 +969,23 @@ def _after_fork():
current = current_thread() current = current_thread()
_main_thread = current _main_thread = current
with _active_limbo_lock: with _active_limbo_lock:
for thread in _enumerate(): # Dangling thread instances must still have their locks reset,
# because someone may join() them.
threads = set(_enumerate())
threads.update(_dangling)
for thread in threads:
# Any lock/condition variable may be currently locked or in an # Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them. # invalid state, so we reinitialize them.
thread._reset_internal_locks()
if thread is current: if thread is current:
# There is only one active thread. We reset the ident to # There is only one active thread. We reset the ident to
# its new value since it can have changed. # its new value since it can have changed.
thread._reset_internal_locks(True)
ident = get_ident() ident = get_ident()
thread._ident = ident thread._ident = ident
new_active[ident] = thread new_active[ident] = thread
else: else:
# All the others are already stopped. # All the others are already stopped.
thread._reset_internal_locks(False)
thread._stop() thread._stop()
_limbo.clear() _limbo.clear()

View File

@ -11,7 +11,7 @@ Simple usage:
import unittest import unittest
class IntegerArithmenticTestCase(unittest.TestCase): class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*' def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3) self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1) self.assertEqual(0 + 1, 1)

View File

@ -2,9 +2,6 @@
Python News Python News
+++++++++++ +++++++++++
What's New in Python 3.4.0 Alpha 3?
===================================
Projected Release date: 2013-09-29 Projected Release date: 2013-09-29
Core and Builtins Core and Builtins
@ -13,6 +10,17 @@ Core and Builtins
Library Library
------- -------
- The :envvar:`PYTHONFAULTHANDLER` environment variable now only enables the
faulthandler module if the variable is non-empty. Same behaviour than other
variables like :envvar:`PYTHONDONTWRITEBYTECODE`.
Tests
-----
- Issue #18952: Fix regression in support data downloads introduced when
test.support was converted to a package. Regression noticed by Zachary
Ware.
What's New in Python 3.4.0 Alpha 2? What's New in Python 3.4.0 Alpha 2?
=================================== ===================================
@ -68,6 +76,10 @@ Core and Builtins
Library Library
------- -------
- Issue #18808: Thread.join() now waits for the underlying thread state to
be destroyed before returning. This prevents unpredictable aborts in
Py_EndInterpreter() when some non-daemon threads are still running.
- Issue #18458: Prevent crashes with newer versions of libedit. Its readline - Issue #18458: Prevent crashes with newer versions of libedit. Its readline
emulation has changed from 0-based indexing to 1-based like gnu readline. emulation has changed from 0-based indexing to 1-based like gnu readline.
@ -75,7 +87,7 @@ Library
readline activation code in ``site.py``. readline activation code in ``site.py``.
- Issue #18672: Fixed format specifiers for Py_ssize_t in debugging output in - Issue #18672: Fixed format specifiers for Py_ssize_t in debugging output in
the _sre moduel. the _sre module.
- Issue #18830: inspect.getclasstree() no more produces duplicated entries even - Issue #18830: inspect.getclasstree() no more produces duplicated entries even
when input list contains duplicates. when input list contains duplicates.

View File

@ -99,13 +99,15 @@ multiprocessing_send(PyObject *self, PyObject *args)
{ {
HANDLE handle; HANDLE handle;
Py_buffer buf; Py_buffer buf;
int ret; int ret, length;
if (!PyArg_ParseTuple(args, F_HANDLE "y*:send" , &handle, &buf)) if (!PyArg_ParseTuple(args, F_HANDLE "y*:send" , &handle, &buf))
return NULL; return NULL;
length = (int)Py_MIN(buf.len, INT_MAX);
Py_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS
ret = send((SOCKET) handle, buf.buf, buf.len, 0); ret = send((SOCKET) handle, buf.buf, length, 0);
Py_END_ALLOW_THREADS Py_END_ALLOW_THREADS
PyBuffer_Release(&buf); PyBuffer_Release(&buf);

View File

@ -1172,6 +1172,66 @@ yet finished.\n\
This function is meant for internal and specialized purposes only.\n\ This function is meant for internal and specialized purposes only.\n\
In most applications `threading.enumerate()` should be used instead."); In most applications `threading.enumerate()` should be used instead.");
static void
release_sentinel(void *wr)
{
/* Tricky: this function is called when the current thread state
is being deleted. Therefore, only simple C code can safely
execute here. */
PyObject *obj = PyWeakref_GET_OBJECT(wr);
lockobject *lock;
if (obj != Py_None) {
assert(Py_TYPE(obj) == &Locktype);
lock = (lockobject *) obj;
if (lock->locked) {
PyThread_release_lock(lock->lock_lock);
lock->locked = 0;
}
}
/* Deallocating a weakref with a NULL callback only calls
PyObject_GC_Del(), which can't call any Python code. */
Py_DECREF(wr);
}
static PyObject *
thread__set_sentinel(PyObject *self)
{
PyObject *wr;
PyThreadState *tstate = PyThreadState_Get();
lockobject *lock;
if (tstate->on_delete_data != NULL) {
/* We must support the re-creation of the lock from a
fork()ed child. */
assert(tstate->on_delete == &release_sentinel);
wr = (PyObject *) tstate->on_delete_data;
tstate->on_delete = NULL;
tstate->on_delete_data = NULL;
Py_DECREF(wr);
}
lock = newlockobject();
if (lock == NULL)
return NULL;
/* The lock is owned by whoever called _set_sentinel(), but the weakref
hangs to the thread state. */
wr = PyWeakref_NewRef((PyObject *) lock, NULL);
if (wr == NULL) {
Py_DECREF(lock);
return NULL;
}
tstate->on_delete_data = (void *) wr;
tstate->on_delete = &release_sentinel;
return (PyObject *) lock;
}
PyDoc_STRVAR(_set_sentinel_doc,
"_set_sentinel() -> lock\n\
\n\
Set a sentinel lock that will be released when the current thread\n\
state is finalized (after it is untied from the interpreter).\n\
\n\
This is a private API for the threading module.");
static PyObject * static PyObject *
thread_stack_size(PyObject *self, PyObject *args) thread_stack_size(PyObject *self, PyObject *args)
{ {
@ -1247,6 +1307,8 @@ static PyMethodDef thread_methods[] = {
METH_NOARGS, _count_doc}, METH_NOARGS, _count_doc},
{"stack_size", (PyCFunction)thread_stack_size, {"stack_size", (PyCFunction)thread_stack_size,
METH_VARARGS, stack_size_doc}, METH_VARARGS, stack_size_doc},
{"_set_sentinel", (PyCFunction)thread__set_sentinel,
METH_NOARGS, _set_sentinel_doc},
{NULL, NULL} /* sentinel */ {NULL, NULL} /* sentinel */
}; };

View File

@ -1048,8 +1048,11 @@ faulthandler_env_options(void)
{ {
PyObject *xoptions, *key, *module, *res; PyObject *xoptions, *key, *module, *res;
_Py_IDENTIFIER(enable); _Py_IDENTIFIER(enable);
char *p;
if (!Py_GETENV("PYTHONFAULTHANDLER")) { if (!((p = Py_GETENV("PYTHONFAULTHANDLER")) && *p != '\0')) {
/* PYTHONFAULTHANDLER environment variable is missing
or an empty string */
int has_key; int has_key;
xoptions = PySys_GetXOptions(); xoptions = PySys_GetXOptions();

View File

@ -4,7 +4,7 @@
/* Itertools module written and maintained /* Itertools module written and maintained
by Raymond D. Hettinger <python@rcn.com> by Raymond D. Hettinger <python@rcn.com>
Copyright (c) 2003 Python Software Foundation. Copyright (c) 2003-2013 Python Software Foundation.
All rights reserved. All rights reserved.
*/ */
@ -4456,6 +4456,7 @@ repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times\n\
Iterators terminating on the shortest input sequence:\n\ Iterators terminating on the shortest input sequence:\n\
accumulate(p[, func]) --> p0, p0+p1, p0+p1+p2\n\ accumulate(p[, func]) --> p0, p0+p1, p0+p1+p2\n\
chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... \n\ chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... \n\
chain.from_iterable([p, q, ...]) --> p0, p1, ... plast, q0, q1, ... \n\
compress(data, selectors) --> (d[0] if s[0]), (d[1] if s[1]), ...\n\ compress(data, selectors) --> (d[0] if s[0]), (d[1] if s[1]), ...\n\
dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails\n\ dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails\n\
groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v)\n\ groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v)\n\

View File

@ -1955,7 +1955,6 @@ _PyObject_DebugTypeStats(FILE *out)
_PyFrame_DebugMallocStats(out); _PyFrame_DebugMallocStats(out);
_PyList_DebugMallocStats(out); _PyList_DebugMallocStats(out);
_PyMethod_DebugMallocStats(out); _PyMethod_DebugMallocStats(out);
_PySet_DebugMallocStats(out);
_PyTuple_DebugMallocStats(out); _PyTuple_DebugMallocStats(out);
} }

View File

@ -1,51 +1,12 @@
/* set object implementation /* set object implementation
Written and maintained by Raymond D. Hettinger <python@rcn.com> Written and maintained by Raymond D. Hettinger <python@rcn.com>
Derived from Lib/sets.py and Objects/dictobject.c. Derived from Lib/sets.py and Objects/dictobject.c.
Copyright (c) 2003-2013 Python Software Foundation. Copyright (c) 2003-2013 Python Software Foundation.
All rights reserved. All rights reserved.
*/
#include "Python.h"
#include "structmember.h"
#include "stringlib/eq.h"
/* This must be >= 1 */
#define PERTURB_SHIFT 5
/* This should be >= PySet_MINSIZE - 1 */
#define LINEAR_PROBES 9
/* Object used as dummy key to fill deleted entries */
static PyObject _dummy_struct;
#define dummy (&_dummy_struct)
/* Exported for the gdb plugin's benefit. */
PyObject *_PySet_Dummy = dummy;
#define INIT_NONZERO_SET_SLOTS(so) do { \
(so)->table = (so)->smalltable; \
(so)->mask = PySet_MINSIZE - 1; \
(so)->hash = -1; \
} while(0)
#define EMPTY_TO_MINSIZE(so) do { \
memset((so)->smalltable, 0, sizeof((so)->smalltable)); \
(so)->used = (so)->fill = 0; \
INIT_NONZERO_SET_SLOTS(so); \
} while(0)
/* Reuse scheme to save calls to malloc, free, and memset */
#ifndef PySet_MAXFREELIST
#define PySet_MAXFREELIST 80
#endif
static PySetObject *free_list[PySet_MAXFREELIST];
static int numfree = 0;
/*
The basic lookup function used by all operations. The basic lookup function used by all operations.
This is based on Algorithm D from Knuth Vol. 3, Sec. 6.4. This is based on Algorithm D from Knuth Vol. 3, Sec. 6.4.
@ -66,6 +27,28 @@ Unlike the dictionary implementation, the lookkey functions can return
NULL if the rich comparison returns an error. NULL if the rich comparison returns an error.
*/ */
#include "Python.h"
#include "structmember.h"
#include "stringlib/eq.h"
/* Object used as dummy key to fill deleted entries */
static PyObject _dummy_struct;
#define dummy (&_dummy_struct)
/* Exported for the gdb plugin's benefit. */
PyObject *_PySet_Dummy = dummy;
/* ======================================================================== */
/* ======= Begin logic for probing the hash table ========================= */
/* This should be >= PySet_MINSIZE - 1 */
#define LINEAR_PROBES 9
/* This must be >= 1 */
#define PERTURB_SHIFT 5
static setentry * static setentry *
set_lookkey(PySetObject *so, PyObject *key, Py_hash_t hash) set_lookkey(PySetObject *so, PyObject *key, Py_hash_t hash)
{ {
@ -199,38 +182,6 @@ set_lookkey_unicode(PySetObject *so, PyObject *key, Py_hash_t hash)
return freeslot == NULL ? entry : freeslot; return freeslot == NULL ? entry : freeslot;
} }
/*
Internal routine to insert a new key into the table.
Used by the public insert routine.
Eats a reference to key.
*/
static int
set_insert_key(PySetObject *so, PyObject *key, Py_hash_t hash)
{
setentry *entry;
assert(so->lookup != NULL);
entry = so->lookup(so, key, hash);
if (entry == NULL)
return -1;
if (entry->key == NULL) {
/* UNUSED */
so->fill++;
entry->key = key;
entry->hash = hash;
so->used++;
} else if (entry->key == dummy) {
/* DUMMY */
entry->key = key;
entry->hash = hash;
so->used++;
} else {
/* ACTIVE */
Py_DECREF(key);
}
return 0;
}
/* /*
Internal routine used by set_table_resize() to insert an item which is Internal routine used by set_table_resize() to insert an item which is
known to be absent from the set. This routine also assumes that known to be absent from the set. This routine also assumes that
@ -268,6 +219,42 @@ set_insert_clean(PySetObject *so, PyObject *key, Py_hash_t hash)
so->used++; so->used++;
} }
/* ======== End logic for probing the hash table ========================== */
/* ======================================================================== */
/*
Internal routine to insert a new key into the table.
Used by the public insert routine.
Eats a reference to key.
*/
static int
set_insert_key(PySetObject *so, PyObject *key, Py_hash_t hash)
{
setentry *entry;
assert(so->lookup != NULL);
entry = so->lookup(so, key, hash);
if (entry == NULL)
return -1;
if (entry->key == NULL) {
/* UNUSED */
so->fill++;
entry->key = key;
entry->hash = hash;
so->used++;
} else if (entry->key == dummy) {
/* DUMMY */
entry->key = key;
entry->hash = hash;
so->used++;
} else {
/* ACTIVE */
Py_DECREF(key);
}
return 0;
}
/* /*
Restructure the table by allocating a new table and reinserting all Restructure the table by allocating a new table and reinserting all
keys again. When entries have been deleted, the new table may keys again. When entries have been deleted, the new table may
@ -441,6 +428,17 @@ set_discard_key(PySetObject *so, PyObject *key)
return DISCARD_FOUND; return DISCARD_FOUND;
} }
static void
set_empty_to_minsize(PySetObject *so)
{
memset(so->smalltable, 0, sizeof(so->smalltable));
so->fill = 0;
so->used = 0;
so->mask = PySet_MINSIZE - 1;
so->table = so->smalltable;
so->hash = -1;
}
static int static int
set_clear_internal(PySetObject *so) set_clear_internal(PySetObject *so)
{ {
@ -448,14 +446,13 @@ set_clear_internal(PySetObject *so)
int table_is_malloced; int table_is_malloced;
Py_ssize_t fill; Py_ssize_t fill;
setentry small_copy[PySet_MINSIZE]; setentry small_copy[PySet_MINSIZE];
#ifdef Py_DEBUG
Py_ssize_t i, n;
assert (PyAnySet_Check(so));
n = so->mask + 1; #ifdef Py_DEBUG
i = 0; Py_ssize_t i = 0;
Py_ssize_t n = so->mask + 1;
#endif #endif
assert (PyAnySet_Check(so));
table = so->table; table = so->table;
assert(table != NULL); assert(table != NULL);
table_is_malloced = table != so->smalltable; table_is_malloced = table != so->smalltable;
@ -468,7 +465,7 @@ set_clear_internal(PySetObject *so)
*/ */
fill = so->fill; fill = so->fill;
if (table_is_malloced) if (table_is_malloced)
EMPTY_TO_MINSIZE(so); set_empty_to_minsize(so);
else if (fill > 0) { else if (fill > 0) {
/* It's a small table with something that needs to be cleared. /* It's a small table with something that needs to be cleared.
@ -477,7 +474,7 @@ set_clear_internal(PySetObject *so)
*/ */
memcpy(small_copy, table, sizeof(small_copy)); memcpy(small_copy, table, sizeof(small_copy));
table = small_copy; table = small_copy;
EMPTY_TO_MINSIZE(so); set_empty_to_minsize(so);
} }
/* else it's a small table that's already empty */ /* else it's a small table that's already empty */
@ -560,9 +557,6 @@ set_dealloc(PySetObject *so)
} }
if (so->table != so->smalltable) if (so->table != so->smalltable)
PyMem_DEL(so->table); PyMem_DEL(so->table);
if (numfree < PySet_MAXFREELIST && PyAnySet_CheckExact(so))
free_list[numfree++] = so;
else
Py_TYPE(so)->tp_free(so); Py_TYPE(so)->tp_free(so);
Py_TRASHCAN_SAFE_END(so) Py_TRASHCAN_SAFE_END(so)
} }
@ -1018,24 +1012,16 @@ make_new_set(PyTypeObject *type, PyObject *iterable)
PySetObject *so = NULL; PySetObject *so = NULL;
/* create PySetObject structure */ /* create PySetObject structure */
if (numfree &&
(type == &PySet_Type || type == &PyFrozenSet_Type)) {
so = free_list[--numfree];
assert (so != NULL && PyAnySet_CheckExact(so));
Py_TYPE(so) = type;
_Py_NewReference((PyObject *)so);
EMPTY_TO_MINSIZE(so);
PyObject_GC_Track(so);
} else {
so = (PySetObject *)type->tp_alloc(type, 0); so = (PySetObject *)type->tp_alloc(type, 0);
if (so == NULL) if (so == NULL)
return NULL; return NULL;
/* tp_alloc has already zeroed the structure */
assert(so->table == NULL && so->fill == 0 && so->used == 0);
INIT_NONZERO_SET_SLOTS(so);
}
so->fill = 0;
so->used = 0;
so->mask = PySet_MINSIZE - 1;
so->table = so->smalltable;
so->lookup = set_lookkey_unicode; so->lookup = set_lookkey_unicode;
so->hash = -1;
so->weakreflist = NULL; so->weakreflist = NULL;
if (iterable != NULL) { if (iterable != NULL) {
@ -1098,34 +1084,15 @@ frozenset_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
int int
PySet_ClearFreeList(void) PySet_ClearFreeList(void)
{ {
int freelist_size = numfree; return 0;
PySetObject *so;
while (numfree) {
numfree--;
so = free_list[numfree];
PyObject_GC_Del(so);
}
return freelist_size;
} }
void void
PySet_Fini(void) PySet_Fini(void)
{ {
PySet_ClearFreeList();
Py_CLEAR(emptyfrozenset); Py_CLEAR(emptyfrozenset);
} }
/* Print summary info about the state of the optimized allocator */
void
_PySet_DebugMallocStats(FILE *out)
{
_PyDebugAllocatorStats(out,
"free PySetObject",
numfree, sizeof(PySetObject));
}
static PyObject * static PyObject *
set_new(PyTypeObject *type, PyObject *args, PyObject *kwds) set_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{ {
@ -2398,7 +2365,7 @@ test_c_api(PySetObject *so)
Py_ssize_t count; Py_ssize_t count;
char *s; char *s;
Py_ssize_t i; Py_ssize_t i;
PyObject *elem=NULL, *dup=NULL, *t, *f, *dup2, *x; PyObject *elem=NULL, *dup=NULL, *t, *f, *dup2, *x=NULL;
PyObject *ob = (PyObject *)so; PyObject *ob = (PyObject *)so;
Py_hash_t hash; Py_hash_t hash;
PyObject *str; PyObject *str;

View File

@ -208,6 +208,8 @@ new_threadstate(PyInterpreterState *interp, int init)
tstate->trash_delete_nesting = 0; tstate->trash_delete_nesting = 0;
tstate->trash_delete_later = NULL; tstate->trash_delete_later = NULL;
tstate->on_delete = NULL;
tstate->on_delete_data = NULL;
if (init) if (init)
_PyThreadState_Init(tstate); _PyThreadState_Init(tstate);
@ -390,6 +392,9 @@ tstate_delete_common(PyThreadState *tstate)
if (tstate->next) if (tstate->next)
tstate->next->prev = tstate->prev; tstate->next->prev = tstate->prev;
HEAD_UNLOCK(); HEAD_UNLOCK();
if (tstate->on_delete != NULL) {
tstate->on_delete(tstate->on_delete_data);
}
PyMem_RawFree(tstate); PyMem_RawFree(tstate);
} }