2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Unit tests for the multiprocessing package
|
|
|
|
#
|
|
|
|
|
|
|
|
import unittest
|
2018-11-04 18:40:32 -04:00
|
|
|
import unittest.mock
|
2008-06-11 13:44:04 -03:00
|
|
|
import queue as pyqueue
|
2022-06-09 13:55:12 -03:00
|
|
|
import textwrap
|
2008-06-11 13:44:04 -03:00
|
|
|
import time
|
2009-07-17 09:07:01 -03:00
|
|
|
import io
|
2011-12-21 06:03:24 -04:00
|
|
|
import itertools
|
2008-06-11 13:44:04 -03:00
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
import gc
|
2011-04-05 13:11:33 -03:00
|
|
|
import errno
|
2023-07-06 19:46:50 -03:00
|
|
|
import functools
|
2008-06-11 13:44:04 -03:00
|
|
|
import signal
|
|
|
|
import array
|
|
|
|
import socket
|
|
|
|
import random
|
|
|
|
import logging
|
2019-05-13 16:15:32 -03:00
|
|
|
import subprocess
|
2012-06-15 14:26:07 -03:00
|
|
|
import struct
|
2012-10-08 10:56:24 -03:00
|
|
|
import operator
|
2022-04-22 19:47:09 -03:00
|
|
|
import pathlib
|
2019-02-24 00:08:16 -04:00
|
|
|
import pickle
|
2017-03-24 09:52:11 -03:00
|
|
|
import weakref
|
2018-09-04 05:53:54 -03:00
|
|
|
import warnings
|
Merged revisions 70734,70775,70856,70874,70876-70877 via svnmerge
........
r70734 | r.david.murray | 2009-03-30 15:04:00 -0400 (Mon, 30 Mar 2009) | 7 lines
Add import_function method to test.test_support, and modify a number of
tests that expect to be skipped if imports fail or functions don't
exist to use import_function and import_module. The ultimate goal is
to change regrtest to not skip automatically on ImportError. Checking
in now to make sure the buldbots don't show any errors on platforms
I can't direct test on.
........
r70775 | r.david.murray | 2009-03-30 19:05:48 -0400 (Mon, 30 Mar 2009) | 4 lines
Change more tests to use import_module for the modules that
should cause tests to be skipped. Also rename import_function
to the more descriptive get_attribute and add a docstring.
........
r70856 | r.david.murray | 2009-03-31 14:32:17 -0400 (Tue, 31 Mar 2009) | 7 lines
A few more test skips via import_module, and change import_module to
return the error message produced by importlib, so that if an import
in the package whose import is being wrapped is what failed the skip
message will contain the name of that module instead of the name of the
wrapped module. Also fixed formatting of some previous comments.
........
r70874 | r.david.murray | 2009-03-31 15:33:15 -0400 (Tue, 31 Mar 2009) | 5 lines
Improve test_support.import_module docstring, remove
deprecated flag from get_attribute since it isn't likely
to do anything useful.
........
r70876 | r.david.murray | 2009-03-31 15:49:15 -0400 (Tue, 31 Mar 2009) | 4 lines
Remove the regrtest check that turns any ImportError into a skipped test.
Hopefully all modules whose imports legitimately result in a skipped
test have been properly wrapped by the previous commits.
........
r70877 | r.david.murray | 2009-03-31 15:57:24 -0400 (Tue, 31 Mar 2009) | 2 lines
Add NEWS entry for regrtest change.
........
2009-03-31 20:16:50 -03:00
|
|
|
import test.support
|
2015-05-06 01:01:52 -03:00
|
|
|
import test.support.script_helper
|
2017-09-14 18:40:56 -03:00
|
|
|
from test import support
|
2020-06-04 09:48:17 -03:00
|
|
|
from test.support import hashlib_helper
|
2020-06-30 10:46:31 -03:00
|
|
|
from test.support import import_helper
|
2020-08-07 12:18:38 -03:00
|
|
|
from test.support import os_helper
|
2023-07-06 19:46:50 -03:00
|
|
|
from test.support import script_helper
|
2020-04-25 04:06:29 -03:00
|
|
|
from test.support import socket_helper
|
2020-05-27 19:10:27 -03:00
|
|
|
from test.support import threading_helper
|
2020-08-07 12:18:38 -03:00
|
|
|
from test.support import warnings_helper
|
2008-06-11 13:44:04 -03:00
|
|
|
|
Merged revisions 66670,66681,66688,66696-66699 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r66670 | georg.brandl | 2008-09-28 15:01:36 -0500 (Sun, 28 Sep 2008) | 2 lines
Don't show version in title.
........
r66681 | georg.brandl | 2008-09-29 11:51:35 -0500 (Mon, 29 Sep 2008) | 2 lines
Update nasm location.
........
r66688 | jesse.noller | 2008-09-29 19:15:45 -0500 (Mon, 29 Sep 2008) | 2 lines
issue3770: if SEM_OPEN is 0, disable the mp.synchronize module, rev. Nick Coghlan, Damien Miller
........
r66696 | andrew.kuchling | 2008-09-30 07:31:07 -0500 (Tue, 30 Sep 2008) | 1 line
Edits, and add markup
........
r66697 | andrew.kuchling | 2008-09-30 08:00:34 -0500 (Tue, 30 Sep 2008) | 1 line
Markup fix
........
r66698 | andrew.kuchling | 2008-09-30 08:00:51 -0500 (Tue, 30 Sep 2008) | 1 line
Markup fixes
........
r66699 | andrew.kuchling | 2008-09-30 08:01:46 -0500 (Tue, 30 Sep 2008) | 1 line
Markup fixes. (optparse.rst probably needs an entire revision pass.)
........
2008-10-04 19:00:42 -03:00
|
|
|
|
Merged revisions 70734,70775,70856,70874,70876-70877 via svnmerge
........
r70734 | r.david.murray | 2009-03-30 15:04:00 -0400 (Mon, 30 Mar 2009) | 7 lines
Add import_function method to test.test_support, and modify a number of
tests that expect to be skipped if imports fail or functions don't
exist to use import_function and import_module. The ultimate goal is
to change regrtest to not skip automatically on ImportError. Checking
in now to make sure the buldbots don't show any errors on platforms
I can't direct test on.
........
r70775 | r.david.murray | 2009-03-30 19:05:48 -0400 (Mon, 30 Mar 2009) | 4 lines
Change more tests to use import_module for the modules that
should cause tests to be skipped. Also rename import_function
to the more descriptive get_attribute and add a docstring.
........
r70856 | r.david.murray | 2009-03-31 14:32:17 -0400 (Tue, 31 Mar 2009) | 7 lines
A few more test skips via import_module, and change import_module to
return the error message produced by importlib, so that if an import
in the package whose import is being wrapped is what failed the skip
message will contain the name of that module instead of the name of the
wrapped module. Also fixed formatting of some previous comments.
........
r70874 | r.david.murray | 2009-03-31 15:33:15 -0400 (Tue, 31 Mar 2009) | 5 lines
Improve test_support.import_module docstring, remove
deprecated flag from get_attribute since it isn't likely
to do anything useful.
........
r70876 | r.david.murray | 2009-03-31 15:49:15 -0400 (Tue, 31 Mar 2009) | 4 lines
Remove the regrtest check that turns any ImportError into a skipped test.
Hopefully all modules whose imports legitimately result in a skipped
test have been properly wrapped by the previous commits.
........
r70877 | r.david.murray | 2009-03-31 15:57:24 -0400 (Tue, 31 Mar 2009) | 2 lines
Add NEWS entry for regrtest change.
........
2009-03-31 20:16:50 -03:00
|
|
|
# Skip tests if _multiprocessing wasn't built.
|
2020-06-30 10:46:31 -03:00
|
|
|
_multiprocessing = import_helper.import_module('_multiprocessing')
|
Merged revisions 70734,70775,70856,70874,70876-70877 via svnmerge
........
r70734 | r.david.murray | 2009-03-30 15:04:00 -0400 (Mon, 30 Mar 2009) | 7 lines
Add import_function method to test.test_support, and modify a number of
tests that expect to be skipped if imports fail or functions don't
exist to use import_function and import_module. The ultimate goal is
to change regrtest to not skip automatically on ImportError. Checking
in now to make sure the buldbots don't show any errors on platforms
I can't direct test on.
........
r70775 | r.david.murray | 2009-03-30 19:05:48 -0400 (Mon, 30 Mar 2009) | 4 lines
Change more tests to use import_module for the modules that
should cause tests to be skipped. Also rename import_function
to the more descriptive get_attribute and add a docstring.
........
r70856 | r.david.murray | 2009-03-31 14:32:17 -0400 (Tue, 31 Mar 2009) | 7 lines
A few more test skips via import_module, and change import_module to
return the error message produced by importlib, so that if an import
in the package whose import is being wrapped is what failed the skip
message will contain the name of that module instead of the name of the
wrapped module. Also fixed formatting of some previous comments.
........
r70874 | r.david.murray | 2009-03-31 15:33:15 -0400 (Tue, 31 Mar 2009) | 5 lines
Improve test_support.import_module docstring, remove
deprecated flag from get_attribute since it isn't likely
to do anything useful.
........
r70876 | r.david.murray | 2009-03-31 15:49:15 -0400 (Tue, 31 Mar 2009) | 4 lines
Remove the regrtest check that turns any ImportError into a skipped test.
Hopefully all modules whose imports legitimately result in a skipped
test have been properly wrapped by the previous commits.
........
r70877 | r.david.murray | 2009-03-31 15:57:24 -0400 (Tue, 31 Mar 2009) | 2 lines
Add NEWS entry for regrtest change.
........
2009-03-31 20:16:50 -03:00
|
|
|
# Skip tests if sem_open implementation is broken.
|
2020-06-18 09:53:19 -03:00
|
|
|
support.skip_if_broken_multiprocessing_synchronize()
|
Merged revisions 80552-80556,80564-80566,80568-80571 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r80552 | victor.stinner | 2010-04-27 23:46:03 +0200 (mar., 27 avril 2010) | 3 lines
Issue #7449, part 1: fix test_support.py for Python compiled without thread
........
r80553 | victor.stinner | 2010-04-27 23:47:01 +0200 (mar., 27 avril 2010) | 1 line
Issue #7449, part 2: regrtest.py -j option requires thread support
........
r80554 | victor.stinner | 2010-04-27 23:51:26 +0200 (mar., 27 avril 2010) | 9 lines
Issue #7449 part 3, test_doctest: import trace module in test_coverage()
Import trace module fail if the threading module is missing. test_coverage() is
only used if test_doctest.py is used with the -c option. This commit allows to
execute the test suite without thread support.
Move "import trace" in test_coverage() and use
test_support.import_module('trace').
........
r80555 | victor.stinner | 2010-04-27 23:56:26 +0200 (mar., 27 avril 2010) | 6 lines
Issue #7449, part 4: skip test_multiprocessing if thread support is disabled
import threading after _multiprocessing to raise a more revelant error message:
"No module named _multiprocessing". _multiprocessing is not compiled without
thread support.
........
r80556 | victor.stinner | 2010-04-28 00:01:24 +0200 (mer., 28 avril 2010) | 8 lines
Issue #7449, part 5: split Test.test_open() of ctypes/test/test_errno.py
* Split Test.test_open() in 2 functions: test_open() and test_thread_open()
* Skip test_open() and test_thread_open() if we are unable to find the C
library
* Skip test_thread_open() if thread support is disabled
* Use unittest.skipUnless(os.name == "nt", ...) on test_GetLastError()
........
r80564 | victor.stinner | 2010-04-28 00:59:35 +0200 (mer., 28 avril 2010) | 4 lines
Issue #7449, part 6: fix test_hashlib for missing threading module
Move @test_support.reap_thread decorator from test_main() to test_threaded_hashing().
........
r80565 | victor.stinner | 2010-04-28 01:01:29 +0200 (mer., 28 avril 2010) | 6 lines
Issue #7449, part 7: simplify threading detection in test_capi
* Skip TestPendingCalls if threading module is missing
* Test if threading module is present or not, instead of test the presence of
_testcapi._test_thread_state
........
r80566 | victor.stinner | 2010-04-28 01:03:16 +0200 (mer., 28 avril 2010) | 4 lines
Issue #7449, part 8: don't skip the whole test_asynchat if threading is missing
TestFifo can be executed without the threading module
........
r80568 | victor.stinner | 2010-04-28 01:14:58 +0200 (mer., 28 avril 2010) | 6 lines
Issue #7449, part 9: fix test_xmlrpclib for missing threading module
* Skip testcases using threads if threading module is missing
* Use "http://" instead of URL in ServerProxyTestCase if threading is missing
because URL is not set in this case
........
r80569 | victor.stinner | 2010-04-28 01:33:58 +0200 (mer., 28 avril 2010) | 6 lines
Partial revert of r80556 (Issue #7449, part 5, fix ctypes test)
Rewrite r80556: the thread test have to be executed just after the test on
libc_open() and so the test cannot be splitted in two functions (without
duplicating code, and I don't want to duplicate code).
........
r80570 | victor.stinner | 2010-04-28 01:51:16 +0200 (mer., 28 avril 2010) | 8 lines
Issue #7449, part 10: test_cmd imports trace module using test_support.import_module()
Use test_support.import_module() instead of import to raise a SkipTest
exception if the import fail. Import trace fails if the threading module is
missing.
See also part 3: test_doctest: import trace module in test_coverage().
........
r80571 | victor.stinner | 2010-04-28 01:55:59 +0200 (mer., 28 avril 2010) | 6 lines
Issue #7449, last part (11): fix many tests if thread support is disabled
* Use try/except ImportError or test_support.import_module() to import thread
and threading modules
* Add @unittest.skipUnless(threading, ...) to testcases using threads
........
2010-04-28 19:31:17 -03:00
|
|
|
import threading
|
Merged revisions 66670,66681,66688,66696-66699 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r66670 | georg.brandl | 2008-09-28 15:01:36 -0500 (Sun, 28 Sep 2008) | 2 lines
Don't show version in title.
........
r66681 | georg.brandl | 2008-09-29 11:51:35 -0500 (Mon, 29 Sep 2008) | 2 lines
Update nasm location.
........
r66688 | jesse.noller | 2008-09-29 19:15:45 -0500 (Mon, 29 Sep 2008) | 2 lines
issue3770: if SEM_OPEN is 0, disable the mp.synchronize module, rev. Nick Coghlan, Damien Miller
........
r66696 | andrew.kuchling | 2008-09-30 07:31:07 -0500 (Tue, 30 Sep 2008) | 1 line
Edits, and add markup
........
r66697 | andrew.kuchling | 2008-09-30 08:00:34 -0500 (Tue, 30 Sep 2008) | 1 line
Markup fix
........
r66698 | andrew.kuchling | 2008-09-30 08:00:51 -0500 (Tue, 30 Sep 2008) | 1 line
Markup fixes
........
r66699 | andrew.kuchling | 2008-09-30 08:01:46 -0500 (Tue, 30 Sep 2008) | 1 line
Markup fixes. (optparse.rst probably needs an entire revision pass.)
........
2008-10-04 19:00:42 -03:00
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
import multiprocessing.connection
|
2017-07-24 19:33:56 -03:00
|
|
|
import multiprocessing.dummy
|
2008-06-11 13:44:04 -03:00
|
|
|
import multiprocessing.heap
|
2017-07-24 19:33:56 -03:00
|
|
|
import multiprocessing.managers
|
2008-06-11 13:44:04 -03:00
|
|
|
import multiprocessing.pool
|
2017-07-24 19:33:56 -03:00
|
|
|
import multiprocessing.queues
|
2023-06-06 17:50:43 -03:00
|
|
|
from multiprocessing.connection import wait
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2011-09-20 15:36:51 -03:00
|
|
|
from multiprocessing import util
|
|
|
|
|
|
|
|
try:
|
|
|
|
from multiprocessing import reduction
|
2013-08-14 11:35:41 -03:00
|
|
|
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
|
2011-09-20 15:36:51 -03:00
|
|
|
except ImportError:
|
|
|
|
HAS_REDUCTION = False
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2010-10-06 22:12:19 -03:00
|
|
|
try:
|
|
|
|
from multiprocessing.sharedctypes import Value, copy
|
|
|
|
HAS_SHAREDCTYPES = True
|
|
|
|
except ImportError:
|
|
|
|
HAS_SHAREDCTYPES = False
|
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
try:
|
|
|
|
from multiprocessing import shared_memory
|
|
|
|
HAS_SHMEM = True
|
|
|
|
except ImportError:
|
|
|
|
HAS_SHMEM = False
|
|
|
|
|
2011-08-23 14:46:22 -03:00
|
|
|
try:
|
|
|
|
import msvcrt
|
|
|
|
except ImportError:
|
|
|
|
msvcrt = None
|
|
|
|
|
2017-09-15 10:55:31 -03:00
|
|
|
|
2023-10-10 21:57:53 -03:00
|
|
|
if support.HAVE_ASAN_FORK_BUG:
|
2023-09-25 13:02:04 -03:00
|
|
|
# gh-89363: Skip multiprocessing tests if Python is built with ASAN to
|
2022-03-01 10:44:08 -04:00
|
|
|
# work around a libasan race condition: dead lock in pthread_create().
|
2023-10-10 21:57:53 -03:00
|
|
|
raise unittest.SkipTest("libasan has a pthread_create() dead lock related to thread+fork")
|
2022-03-01 10:44:08 -04:00
|
|
|
|
|
|
|
|
2023-10-10 22:49:09 -03:00
|
|
|
# gh-110666: Tolerate a difference of 100 ms when comparing timings
|
|
|
|
# (clock resolution)
|
|
|
|
CLOCK_RES = 0.100
|
|
|
|
|
|
|
|
|
2008-07-13 15:45:30 -03:00
|
|
|
def latin(s):
|
|
|
|
return s.encode('latin')
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2017-07-24 19:33:56 -03:00
|
|
|
|
|
|
|
def close_queue(queue):
|
|
|
|
if isinstance(queue, multiprocessing.queues.Queue):
|
|
|
|
queue.close()
|
|
|
|
queue.join_thread()
|
|
|
|
|
|
|
|
|
2017-09-15 10:55:31 -03:00
|
|
|
def join_process(process):
|
2017-09-14 18:40:56 -03:00
|
|
|
# Since multiprocessing.Process has the same API than threading.Thread
|
|
|
|
# (join() and is_alive(), the support function can be reused
|
2020-05-27 19:10:27 -03:00
|
|
|
threading_helper.join_thread(process)
|
2017-09-14 18:40:56 -03:00
|
|
|
|
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
if os.name == "posix":
|
|
|
|
from multiprocessing import resource_tracker
|
|
|
|
|
|
|
|
def _resource_unlink(name, rtype):
|
|
|
|
resource_tracker._CLEANUP_FUNCS[rtype](name)
|
|
|
|
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Constants
|
|
|
|
#
|
|
|
|
|
|
|
|
LOG_LEVEL = util.SUBWARNING
|
2010-01-26 23:36:01 -04:00
|
|
|
#LOG_LEVEL = logging.DEBUG
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
DELTA = 0.1
|
|
|
|
CHECK_TIMINGS = False # making true makes tests take a lot longer
|
|
|
|
# and can sometimes cause some non-serious
|
|
|
|
# failures because some calls block a bit
|
|
|
|
# longer than expected
|
|
|
|
if CHECK_TIMINGS:
|
|
|
|
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
|
|
|
|
else:
|
|
|
|
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
|
|
|
|
|
2022-04-19 11:27:00 -03:00
|
|
|
# BaseManager.shutdown_timeout
|
|
|
|
SHUTDOWN_TIMEOUT = support.SHORT_TIMEOUT
|
|
|
|
|
2022-06-15 13:49:14 -03:00
|
|
|
WAIT_ACTIVE_CHILDREN_TIMEOUT = 5.0
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
HAVE_GETVALUE = not getattr(_multiprocessing,
|
|
|
|
'HAVE_BROKEN_SEM_GETVALUE', False)
|
|
|
|
|
2018-06-25 21:11:06 -03:00
|
|
|
WIN32 = (sys.platform == "win32")
|
|
|
|
|
2012-05-10 12:11:12 -03:00
|
|
|
def wait_for_handle(handle, timeout):
|
|
|
|
if timeout is not None and timeout < 0.0:
|
|
|
|
timeout = None
|
|
|
|
return wait([handle], timeout)
|
2009-01-19 12:23:53 -04:00
|
|
|
|
2011-08-23 14:46:22 -03:00
|
|
|
try:
|
|
|
|
MAXFD = os.sysconf("SC_OPEN_MAX")
|
|
|
|
except:
|
|
|
|
MAXFD = 256
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
# To speed up tests when using the forkserver, we can preload these:
|
|
|
|
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
|
|
|
|
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
#
|
|
|
|
# Some tests require ctypes
|
|
|
|
#
|
|
|
|
|
|
|
|
try:
|
2017-07-21 07:35:33 -03:00
|
|
|
from ctypes import Structure, c_int, c_double, c_longlong
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
except ImportError:
|
|
|
|
Structure = object
|
2017-07-21 08:24:05 -03:00
|
|
|
c_int = c_double = c_longlong = None
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
|
2011-11-22 13:55:22 -04:00
|
|
|
|
|
|
|
def check_enough_semaphores():
|
|
|
|
"""Check that the system supports enough semaphores to run the test."""
|
|
|
|
# minimum number of semaphores available according to POSIX
|
|
|
|
nsems_min = 256
|
|
|
|
try:
|
|
|
|
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
|
|
|
|
except (AttributeError, ValueError):
|
|
|
|
# sysconf not available or setting not available
|
|
|
|
return
|
|
|
|
if nsems == -1 or nsems >= nsems_min:
|
|
|
|
return
|
|
|
|
raise unittest.SkipTest("The OS doesn't support enough semaphores "
|
|
|
|
"to run the test (required: %d)." % nsems_min)
|
|
|
|
|
|
|
|
|
2023-07-06 19:46:50 -03:00
|
|
|
def only_run_in_spawn_testsuite(reason):
|
|
|
|
"""Returns a decorator: raises SkipTest when SM != spawn at test time.
|
|
|
|
|
|
|
|
This can be useful to save overall Python test suite execution time.
|
|
|
|
"spawn" is the universal mode available on all platforms so this limits the
|
|
|
|
decorated test to only execute within test_multiprocessing_spawn.
|
|
|
|
|
|
|
|
This would not be necessary if we refactored our test suite to split things
|
|
|
|
into other test files when they are not start method specific to be rerun
|
|
|
|
under all start methods.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def decorator(test_item):
|
|
|
|
|
|
|
|
@functools.wraps(test_item)
|
|
|
|
def spawn_check_wrapper(*args, **kwargs):
|
|
|
|
if (start_method := multiprocessing.get_start_method()) != "spawn":
|
|
|
|
raise unittest.SkipTest(f"{start_method=}, not 'spawn'; {reason}")
|
|
|
|
return test_item(*args, **kwargs)
|
|
|
|
|
|
|
|
return spawn_check_wrapper
|
|
|
|
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
|
|
|
|
class TestInternalDecorators(unittest.TestCase):
|
|
|
|
"""Logic within a test suite that could errantly skip tests? Test it!"""
|
|
|
|
|
|
|
|
@unittest.skipIf(sys.platform == "win32", "test requires that fork exists.")
|
|
|
|
def test_only_run_in_spawn_testsuite(self):
|
|
|
|
if multiprocessing.get_start_method() != "spawn":
|
|
|
|
raise unittest.SkipTest("only run in test_multiprocessing_spawn.")
|
|
|
|
|
|
|
|
try:
|
|
|
|
@only_run_in_spawn_testsuite("testing this decorator")
|
|
|
|
def return_four_if_spawn():
|
|
|
|
return 4
|
|
|
|
except Exception as err:
|
|
|
|
self.fail(f"expected decorated `def` not to raise; caught {err}")
|
|
|
|
|
|
|
|
orig_start_method = multiprocessing.get_start_method(allow_none=True)
|
|
|
|
try:
|
|
|
|
multiprocessing.set_start_method("spawn", force=True)
|
|
|
|
self.assertEqual(return_four_if_spawn(), 4)
|
|
|
|
multiprocessing.set_start_method("fork", force=True)
|
|
|
|
with self.assertRaises(unittest.SkipTest) as ctx:
|
|
|
|
return_four_if_spawn()
|
|
|
|
self.assertIn("testing this decorator", str(ctx.exception))
|
|
|
|
self.assertIn("start_method=", str(ctx.exception))
|
|
|
|
finally:
|
|
|
|
multiprocessing.set_start_method(orig_start_method, force=True)
|
|
|
|
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Creates a wrapper for a function which records the time it takes to finish
|
|
|
|
#
|
|
|
|
|
|
|
|
class TimingWrapper(object):
|
|
|
|
|
|
|
|
def __init__(self, func):
|
|
|
|
self.func = func
|
|
|
|
self.elapsed = None
|
|
|
|
|
|
|
|
def __call__(self, *args, **kwds):
|
2018-12-17 04:36:36 -04:00
|
|
|
t = time.monotonic()
|
2008-06-11 13:44:04 -03:00
|
|
|
try:
|
|
|
|
return self.func(*args, **kwds)
|
|
|
|
finally:
|
2018-12-17 04:36:36 -04:00
|
|
|
self.elapsed = time.monotonic() - t
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
#
|
|
|
|
# Base class for test cases
|
|
|
|
#
|
|
|
|
|
|
|
|
class BaseTestCase(object):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes', 'manager', 'threads')
|
|
|
|
|
|
|
|
def assertTimingAlmostEqual(self, a, b):
|
|
|
|
if CHECK_TIMINGS:
|
|
|
|
self.assertAlmostEqual(a, b, 1)
|
|
|
|
|
|
|
|
def assertReturnsIfImplemented(self, value, func, *args):
|
|
|
|
try:
|
|
|
|
res = func(*args)
|
|
|
|
except NotImplementedError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
return self.assertEqual(value, res)
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
# For the sanity of Windows users, rather than crashing or freezing in
|
|
|
|
# multiple ways.
|
|
|
|
def __reduce__(self, *args):
|
|
|
|
raise NotImplementedError("shouldn't try to pickle a test case")
|
|
|
|
|
|
|
|
__reduce_ex__ = __reduce__
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Return the value of a semaphore
|
|
|
|
#
|
|
|
|
|
|
|
|
def get_value(self):
|
|
|
|
try:
|
|
|
|
return self.get_value()
|
|
|
|
except AttributeError:
|
|
|
|
try:
|
|
|
|
return self._Semaphore__value
|
|
|
|
except AttributeError:
|
|
|
|
try:
|
|
|
|
return self._value
|
|
|
|
except AttributeError:
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
#
|
|
|
|
# Testcases
|
|
|
|
#
|
|
|
|
|
2017-06-28 07:29:08 -03:00
|
|
|
class DummyCallable:
|
|
|
|
def __call__(self, q, c):
|
|
|
|
assert isinstance(c, DummyCallable)
|
|
|
|
q.put(5)
|
|
|
|
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
class _TestProcess(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes', 'threads')
|
|
|
|
|
|
|
|
def test_current(self):
|
|
|
|
if self.TYPE == 'threads':
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
current = self.current_process()
|
2008-08-19 16:17:39 -03:00
|
|
|
authkey = current.authkey
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
self.assertTrue(current.is_alive())
|
2008-08-19 16:17:39 -03:00
|
|
|
self.assertTrue(not current.daemon)
|
2010-01-24 15:26:24 -04:00
|
|
|
self.assertIsInstance(authkey, bytes)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTrue(len(authkey) > 0)
|
2008-08-19 16:17:39 -03:00
|
|
|
self.assertEqual(current.ident, os.getpid())
|
|
|
|
self.assertEqual(current.exitcode, None)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2022-04-22 19:47:09 -03:00
|
|
|
def test_set_executable(self):
|
|
|
|
if self.TYPE == 'threads':
|
|
|
|
self.skipTest(f'test not appropriate for {self.TYPE}')
|
|
|
|
paths = [
|
|
|
|
sys.executable, # str
|
|
|
|
sys.executable.encode(), # bytes
|
|
|
|
pathlib.Path(sys.executable) # os.PathLike
|
|
|
|
]
|
|
|
|
for path in paths:
|
|
|
|
self.set_executable(path)
|
|
|
|
p = self.Process()
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
self.assertEqual(p.exitcode, 0)
|
|
|
|
|
2023-09-02 01:45:34 -03:00
|
|
|
@support.requires_resource('cpu')
|
2022-02-26 00:17:13 -04:00
|
|
|
def test_args_argument(self):
|
|
|
|
# bpo-45735: Using list or tuple as *args* in constructor could
|
|
|
|
# achieve the same effect.
|
|
|
|
args_cases = (1, "str", [1], (1,))
|
|
|
|
args_types = (list, tuple)
|
|
|
|
|
|
|
|
test_cases = itertools.product(args_cases, args_types)
|
|
|
|
|
|
|
|
for args, args_type in test_cases:
|
|
|
|
with self.subTest(args=args, args_type=args_type):
|
|
|
|
q = self.Queue(1)
|
|
|
|
# pass a tuple or list as args
|
|
|
|
p = self.Process(target=self._test_args, args=args_type((q, args)))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
child_args = q.get()
|
|
|
|
self.assertEqual(child_args, args)
|
|
|
|
p.join()
|
|
|
|
close_queue(q)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_args(cls, q, arg):
|
|
|
|
q.put(arg)
|
|
|
|
|
2011-02-25 18:07:43 -04:00
|
|
|
def test_daemon_argument(self):
|
|
|
|
if self.TYPE == "threads":
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2011-02-25 18:07:43 -04:00
|
|
|
|
|
|
|
# By default uses the current process's daemon flag.
|
|
|
|
proc0 = self.Process(target=self._test)
|
2011-03-01 20:15:44 -04:00
|
|
|
self.assertEqual(proc0.daemon, self.current_process().daemon)
|
2011-02-25 18:07:43 -04:00
|
|
|
proc1 = self.Process(target=self._test, daemon=True)
|
|
|
|
self.assertTrue(proc1.daemon)
|
|
|
|
proc2 = self.Process(target=self._test, daemon=False)
|
|
|
|
self.assertFalse(proc2.daemon)
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test(cls, q, *args, **kwds):
|
|
|
|
current = cls.current_process()
|
2008-06-11 13:44:04 -03:00
|
|
|
q.put(args)
|
|
|
|
q.put(kwds)
|
2008-08-19 16:17:39 -03:00
|
|
|
q.put(current.name)
|
2010-11-02 20:50:11 -03:00
|
|
|
if cls.TYPE != 'threads':
|
2008-08-19 16:17:39 -03:00
|
|
|
q.put(bytes(current.authkey))
|
2008-06-11 13:44:04 -03:00
|
|
|
q.put(current.pid)
|
|
|
|
|
2019-05-20 16:37:05 -03:00
|
|
|
def test_parent_process_attributes(self):
|
|
|
|
if self.TYPE == "threads":
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
self.assertIsNone(self.parent_process())
|
|
|
|
|
|
|
|
rconn, wconn = self.Pipe(duplex=False)
|
|
|
|
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
parent_pid, parent_name = rconn.recv()
|
|
|
|
self.assertEqual(parent_pid, self.current_process().pid)
|
|
|
|
self.assertEqual(parent_pid, os.getpid())
|
|
|
|
self.assertEqual(parent_name, self.current_process().name)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_send_parent_process(cls, wconn):
|
|
|
|
from multiprocessing.process import parent_process
|
|
|
|
wconn.send([parent_process().pid, parent_process().name])
|
|
|
|
|
|
|
|
def test_parent_process(self):
|
|
|
|
if self.TYPE == "threads":
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
# Launch a child process. Make it launch a grandchild process. Kill the
|
|
|
|
# child process and make sure that the grandchild notices the death of
|
|
|
|
# its parent (a.k.a the child process).
|
|
|
|
rconn, wconn = self.Pipe(duplex=False)
|
|
|
|
p = self.Process(
|
|
|
|
target=self._test_create_grandchild_process, args=(wconn, ))
|
|
|
|
p.start()
|
|
|
|
|
2019-12-10 16:12:26 -04:00
|
|
|
if not rconn.poll(timeout=support.LONG_TIMEOUT):
|
2019-05-20 16:37:05 -03:00
|
|
|
raise AssertionError("Could not communicate with child process")
|
|
|
|
parent_process_status = rconn.recv()
|
|
|
|
self.assertEqual(parent_process_status, "alive")
|
|
|
|
|
|
|
|
p.terminate()
|
|
|
|
p.join()
|
|
|
|
|
2019-12-10 16:12:26 -04:00
|
|
|
if not rconn.poll(timeout=support.LONG_TIMEOUT):
|
2019-05-20 16:37:05 -03:00
|
|
|
raise AssertionError("Could not communicate with child process")
|
|
|
|
parent_process_status = rconn.recv()
|
|
|
|
self.assertEqual(parent_process_status, "not alive")
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_create_grandchild_process(cls, wconn):
|
|
|
|
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
|
|
|
|
p.start()
|
2019-06-25 17:44:11 -03:00
|
|
|
time.sleep(300)
|
2019-05-20 16:37:05 -03:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_report_parent_status(cls, wconn):
|
|
|
|
from multiprocessing.process import parent_process
|
|
|
|
wconn.send("alive" if parent_process().is_alive() else "not alive")
|
2019-12-11 06:30:03 -04:00
|
|
|
parent_process().join(timeout=support.SHORT_TIMEOUT)
|
2019-05-20 16:37:05 -03:00
|
|
|
wconn.send("alive" if parent_process().is_alive() else "not alive")
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_process(self):
|
|
|
|
q = self.Queue(1)
|
|
|
|
e = self.Event()
|
|
|
|
args = (q, 1, 2)
|
|
|
|
kwargs = {'hello':23, 'bye':2.54}
|
|
|
|
name = 'SomeProcess'
|
|
|
|
p = self.Process(
|
|
|
|
target=self._test, args=args, kwargs=kwargs, name=name
|
|
|
|
)
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
current = self.current_process()
|
|
|
|
|
|
|
|
if self.TYPE != 'threads':
|
2010-11-20 15:04:17 -04:00
|
|
|
self.assertEqual(p.authkey, current.authkey)
|
|
|
|
self.assertEqual(p.is_alive(), False)
|
|
|
|
self.assertEqual(p.daemon, True)
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertNotIn(p, self.active_children())
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTrue(type(self.active_children()) is list)
|
2008-08-19 16:17:39 -03:00
|
|
|
self.assertEqual(p.exitcode, None)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
p.start()
|
|
|
|
|
2010-11-20 15:04:17 -04:00
|
|
|
self.assertEqual(p.exitcode, None)
|
|
|
|
self.assertEqual(p.is_alive(), True)
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertIn(p, self.active_children())
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2010-11-20 15:04:17 -04:00
|
|
|
self.assertEqual(q.get(), args[1:])
|
|
|
|
self.assertEqual(q.get(), kwargs)
|
|
|
|
self.assertEqual(q.get(), p.name)
|
2008-06-11 13:44:04 -03:00
|
|
|
if self.TYPE != 'threads':
|
2010-11-20 15:04:17 -04:00
|
|
|
self.assertEqual(q.get(), current.authkey)
|
|
|
|
self.assertEqual(q.get(), p.pid)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
p.join()
|
|
|
|
|
2010-11-20 15:04:17 -04:00
|
|
|
self.assertEqual(p.exitcode, 0)
|
|
|
|
self.assertEqual(p.is_alive(), False)
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertNotIn(p, self.active_children())
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(q)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2019-11-19 15:50:12 -04:00
|
|
|
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
|
|
|
|
def test_process_mainthread_native_id(self):
|
|
|
|
if self.TYPE == 'threads':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
current_mainthread_native_id = threading.main_thread().native_id
|
|
|
|
|
|
|
|
q = self.Queue(1)
|
|
|
|
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
|
|
|
|
p.start()
|
|
|
|
|
|
|
|
child_mainthread_native_id = q.get()
|
|
|
|
p.join()
|
|
|
|
close_queue(q)
|
|
|
|
|
|
|
|
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_process_mainthread_native_id(cls, q):
|
|
|
|
mainthread_native_id = threading.main_thread().native_id
|
|
|
|
q.put(mainthread_native_id)
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
2017-07-18 12:34:23 -03:00
|
|
|
def _sleep_some(cls):
|
2013-10-12 20:49:27 -03:00
|
|
|
time.sleep(100)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2017-06-12 10:28:19 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_sleep(cls, delay):
|
|
|
|
time.sleep(delay)
|
|
|
|
|
2017-07-18 12:34:23 -03:00
|
|
|
def _kill_process(self, meth):
|
2008-06-11 13:44:04 -03:00
|
|
|
if self.TYPE == 'threads':
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2017-07-18 12:34:23 -03:00
|
|
|
p = self.Process(target=self._sleep_some)
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
|
|
|
|
self.assertEqual(p.is_alive(), True)
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertIn(p, self.active_children())
|
2008-08-19 16:17:39 -03:00
|
|
|
self.assertEqual(p.exitcode, None)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2012-05-10 12:11:12 -03:00
|
|
|
join = TimingWrapper(p.join)
|
|
|
|
|
|
|
|
self.assertEqual(join(0), None)
|
|
|
|
self.assertTimingAlmostEqual(join.elapsed, 0.0)
|
|
|
|
self.assertEqual(p.is_alive(), True)
|
|
|
|
|
|
|
|
self.assertEqual(join(-1), None)
|
|
|
|
self.assertTimingAlmostEqual(join.elapsed, 0.0)
|
|
|
|
self.assertEqual(p.is_alive(), True)
|
|
|
|
|
2013-10-17 09:56:18 -03:00
|
|
|
# XXX maybe terminating too soon causes the problems on Gentoo...
|
|
|
|
time.sleep(1)
|
|
|
|
|
2017-07-18 12:34:23 -03:00
|
|
|
meth(p)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2013-10-12 20:49:27 -03:00
|
|
|
if hasattr(signal, 'alarm'):
|
2013-10-17 06:38:37 -03:00
|
|
|
# On the Gentoo buildbot waitpid() often seems to block forever.
|
2013-10-17 09:56:18 -03:00
|
|
|
# We use alarm() to interrupt it if it blocks for too long.
|
2013-10-12 20:49:27 -03:00
|
|
|
def handler(*args):
|
2013-10-15 12:48:51 -03:00
|
|
|
raise RuntimeError('join took too long: %s' % p)
|
2013-10-12 20:49:27 -03:00
|
|
|
old_handler = signal.signal(signal.SIGALRM, handler)
|
|
|
|
try:
|
|
|
|
signal.alarm(10)
|
|
|
|
self.assertEqual(join(), None)
|
|
|
|
finally:
|
2013-10-17 10:24:06 -03:00
|
|
|
signal.alarm(0)
|
2013-10-12 20:49:27 -03:00
|
|
|
signal.signal(signal.SIGALRM, old_handler)
|
|
|
|
else:
|
|
|
|
self.assertEqual(join(), None)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTimingAlmostEqual(join.elapsed, 0.0)
|
|
|
|
|
|
|
|
self.assertEqual(p.is_alive(), False)
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertNotIn(p, self.active_children())
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
p.join()
|
|
|
|
|
2017-07-18 12:34:23 -03:00
|
|
|
return p.exitcode
|
|
|
|
|
|
|
|
def test_terminate(self):
|
|
|
|
exitcode = self._kill_process(multiprocessing.Process.terminate)
|
2023-09-28 21:41:12 -03:00
|
|
|
self.assertEqual(exitcode, -signal.SIGTERM)
|
2017-07-18 12:34:23 -03:00
|
|
|
|
|
|
|
def test_kill(self):
|
|
|
|
exitcode = self._kill_process(multiprocessing.Process.kill)
|
2017-06-12 10:28:19 -03:00
|
|
|
if os.name != 'nt':
|
2017-07-18 12:34:23 -03:00
|
|
|
self.assertEqual(exitcode, -signal.SIGKILL)
|
2023-09-28 21:41:12 -03:00
|
|
|
else:
|
|
|
|
self.assertEqual(exitcode, -signal.SIGTERM)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_cpu_count(self):
|
|
|
|
try:
|
|
|
|
cpus = multiprocessing.cpu_count()
|
|
|
|
except NotImplementedError:
|
|
|
|
cpus = 1
|
|
|
|
self.assertTrue(type(cpus) is int)
|
|
|
|
self.assertTrue(cpus >= 1)
|
|
|
|
|
|
|
|
def test_active_children(self):
|
|
|
|
self.assertEqual(type(self.active_children()), list)
|
|
|
|
|
|
|
|
p = self.Process(target=time.sleep, args=(DELTA,))
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertNotIn(p, self.active_children())
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertIn(p, self.active_children())
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
p.join()
|
2010-01-18 20:09:57 -04:00
|
|
|
self.assertNotIn(p, self.active_children())
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_recursion(cls, wconn, id):
|
2008-06-11 13:44:04 -03:00
|
|
|
wconn.send(id)
|
|
|
|
if len(id) < 2:
|
|
|
|
for i in range(2):
|
2010-11-02 20:50:11 -03:00
|
|
|
p = cls.Process(
|
|
|
|
target=cls._test_recursion, args=(wconn, id+[i])
|
2008-06-11 13:44:04 -03:00
|
|
|
)
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
def test_recursion(self):
|
|
|
|
rconn, wconn = self.Pipe(duplex=False)
|
|
|
|
self._test_recursion(wconn, [])
|
|
|
|
|
|
|
|
time.sleep(DELTA)
|
|
|
|
result = []
|
|
|
|
while rconn.poll():
|
|
|
|
result.append(rconn.recv())
|
|
|
|
|
|
|
|
expected = [
|
|
|
|
[],
|
|
|
|
[0],
|
|
|
|
[0, 0],
|
|
|
|
[0, 1],
|
|
|
|
[1],
|
|
|
|
[1, 0],
|
|
|
|
[1, 1]
|
|
|
|
]
|
|
|
|
self.assertEqual(result, expected)
|
|
|
|
|
2011-06-06 14:35:31 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_sentinel(cls, event):
|
|
|
|
event.wait(10.0)
|
|
|
|
|
|
|
|
def test_sentinel(self):
|
|
|
|
if self.TYPE == "threads":
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2011-06-06 14:35:31 -03:00
|
|
|
event = self.Event()
|
|
|
|
p = self.Process(target=self._test_sentinel, args=(event,))
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
p.sentinel
|
|
|
|
p.start()
|
|
|
|
self.addCleanup(p.join)
|
|
|
|
sentinel = p.sentinel
|
|
|
|
self.assertIsInstance(sentinel, int)
|
|
|
|
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
|
|
|
|
event.set()
|
|
|
|
p.join()
|
2013-08-14 11:35:41 -03:00
|
|
|
self.assertTrue(wait_for_handle(sentinel, timeout=1))
|
2011-06-06 14:35:31 -03:00
|
|
|
|
2017-06-24 14:22:23 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_close(cls, rc=0, q=None):
|
|
|
|
if q is not None:
|
|
|
|
q.get()
|
|
|
|
sys.exit(rc)
|
|
|
|
|
|
|
|
def test_close(self):
|
|
|
|
if self.TYPE == "threads":
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
q = self.Queue()
|
|
|
|
p = self.Process(target=self._test_close, kwargs={'q': q})
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
self.assertEqual(p.is_alive(), True)
|
|
|
|
# Child is still alive, cannot close
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
p.close()
|
|
|
|
|
|
|
|
q.put(None)
|
|
|
|
p.join()
|
|
|
|
self.assertEqual(p.is_alive(), False)
|
|
|
|
self.assertEqual(p.exitcode, 0)
|
|
|
|
p.close()
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
p.is_alive()
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
p.join()
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
p.terminate()
|
|
|
|
p.close()
|
|
|
|
|
|
|
|
wr = weakref.ref(p)
|
|
|
|
del p
|
|
|
|
gc.collect()
|
|
|
|
self.assertIs(wr(), None)
|
|
|
|
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(q)
|
|
|
|
|
2023-09-05 11:56:30 -03:00
|
|
|
@support.requires_resource('walltime')
|
2017-06-12 10:28:19 -03:00
|
|
|
def test_many_processes(self):
|
|
|
|
if self.TYPE == 'threads':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
sm = multiprocessing.get_start_method()
|
|
|
|
N = 5 if sm == 'spawn' else 100
|
|
|
|
|
|
|
|
# Try to overwhelm the forkserver loop with events
|
|
|
|
procs = [self.Process(target=self._test_sleep, args=(0.01,))
|
|
|
|
for i in range(N)]
|
|
|
|
for p in procs:
|
|
|
|
p.start()
|
|
|
|
for p in procs:
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2017-06-12 10:28:19 -03:00
|
|
|
for p in procs:
|
|
|
|
self.assertEqual(p.exitcode, 0)
|
|
|
|
|
2017-07-18 12:34:23 -03:00
|
|
|
procs = [self.Process(target=self._sleep_some)
|
2017-06-12 10:28:19 -03:00
|
|
|
for i in range(N)]
|
|
|
|
for p in procs:
|
|
|
|
p.start()
|
|
|
|
time.sleep(0.001) # let the children start...
|
|
|
|
for p in procs:
|
|
|
|
p.terminate()
|
|
|
|
for p in procs:
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2017-06-12 10:28:19 -03:00
|
|
|
if os.name != 'nt':
|
2017-10-02 12:27:34 -03:00
|
|
|
exitcodes = [-signal.SIGTERM]
|
|
|
|
if sys.platform == 'darwin':
|
|
|
|
# bpo-31510: On macOS, killing a freshly started process with
|
|
|
|
# SIGTERM sometimes kills the process with SIGKILL.
|
|
|
|
exitcodes.append(-signal.SIGKILL)
|
2017-06-12 10:28:19 -03:00
|
|
|
for p in procs:
|
2017-10-02 12:27:34 -03:00
|
|
|
self.assertIn(p.exitcode, exitcodes)
|
2017-06-12 10:28:19 -03:00
|
|
|
|
2017-06-28 07:29:08 -03:00
|
|
|
def test_lose_target_ref(self):
|
|
|
|
c = DummyCallable()
|
|
|
|
wr = weakref.ref(c)
|
|
|
|
q = self.Queue()
|
|
|
|
p = self.Process(target=c, args=(q, c))
|
|
|
|
del c
|
|
|
|
p.start()
|
|
|
|
p.join()
|
2021-08-29 08:04:40 -03:00
|
|
|
gc.collect() # For PyPy or other GCs.
|
2017-06-28 07:29:08 -03:00
|
|
|
self.assertIs(wr(), None)
|
|
|
|
self.assertEqual(q.get(), 5)
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(q)
|
2017-06-28 07:29:08 -03:00
|
|
|
|
2017-07-22 08:22:54 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_child_fd_inflation(self, evt, q):
|
2020-08-07 12:18:38 -03:00
|
|
|
q.put(os_helper.fd_count())
|
2017-07-22 08:22:54 -03:00
|
|
|
evt.wait()
|
|
|
|
|
|
|
|
def test_child_fd_inflation(self):
|
|
|
|
# Number of fds in child processes should not grow with the
|
|
|
|
# number of running children.
|
|
|
|
if self.TYPE == 'threads':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
sm = multiprocessing.get_start_method()
|
|
|
|
if sm == 'fork':
|
|
|
|
# The fork method by design inherits all fds from the parent,
|
|
|
|
# trying to go against it is a lost battle
|
|
|
|
self.skipTest('test not appropriate for {}'.format(sm))
|
|
|
|
|
|
|
|
N = 5
|
|
|
|
evt = self.Event()
|
|
|
|
q = self.Queue()
|
|
|
|
|
|
|
|
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
|
|
|
|
for i in range(N)]
|
|
|
|
for p in procs:
|
|
|
|
p.start()
|
|
|
|
|
|
|
|
try:
|
|
|
|
fd_counts = [q.get() for i in range(N)]
|
|
|
|
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
|
|
|
|
|
|
|
|
finally:
|
|
|
|
evt.set()
|
|
|
|
for p in procs:
|
|
|
|
p.join()
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(q)
|
2017-06-28 07:29:08 -03:00
|
|
|
|
2017-08-16 15:53:28 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_wait_for_threads(self, evt):
|
|
|
|
def func1():
|
|
|
|
time.sleep(0.5)
|
|
|
|
evt.set()
|
|
|
|
|
|
|
|
def func2():
|
|
|
|
time.sleep(20)
|
|
|
|
evt.clear()
|
|
|
|
|
|
|
|
threading.Thread(target=func1).start()
|
|
|
|
threading.Thread(target=func2, daemon=True).start()
|
|
|
|
|
|
|
|
def test_wait_for_threads(self):
|
|
|
|
# A child process should wait for non-daemonic threads to end
|
|
|
|
# before exiting
|
|
|
|
if self.TYPE == 'threads':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
evt = self.Event()
|
|
|
|
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
|
|
|
|
proc.start()
|
|
|
|
proc.join()
|
|
|
|
self.assertTrue(evt.is_set())
|
|
|
|
|
2017-10-22 06:40:31 -03:00
|
|
|
@classmethod
|
2018-03-11 15:21:38 -03:00
|
|
|
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
|
|
|
|
for stream_name, action in break_std_streams.items():
|
|
|
|
if action == 'close':
|
|
|
|
stream = io.StringIO()
|
|
|
|
stream.close()
|
|
|
|
else:
|
|
|
|
assert action == 'remove'
|
|
|
|
stream = None
|
|
|
|
setattr(sys, stream_name, None)
|
2017-10-22 06:40:31 -03:00
|
|
|
evt.set()
|
|
|
|
|
2018-03-11 15:21:38 -03:00
|
|
|
def test_error_on_stdio_flush_1(self):
|
|
|
|
# Check that Process works with broken standard streams
|
2017-10-22 06:40:31 -03:00
|
|
|
streams = [io.StringIO(), None]
|
|
|
|
streams[0].close()
|
|
|
|
for stream_name in ('stdout', 'stderr'):
|
|
|
|
for stream in streams:
|
|
|
|
old_stream = getattr(sys, stream_name)
|
|
|
|
setattr(sys, stream_name, stream)
|
|
|
|
try:
|
|
|
|
evt = self.Event()
|
|
|
|
proc = self.Process(target=self._test_error_on_stdio_flush,
|
|
|
|
args=(evt,))
|
|
|
|
proc.start()
|
|
|
|
proc.join()
|
|
|
|
self.assertTrue(evt.is_set())
|
2018-03-11 15:21:38 -03:00
|
|
|
self.assertEqual(proc.exitcode, 0)
|
|
|
|
finally:
|
|
|
|
setattr(sys, stream_name, old_stream)
|
|
|
|
|
|
|
|
def test_error_on_stdio_flush_2(self):
|
|
|
|
# Same as test_error_on_stdio_flush_1(), but standard streams are
|
|
|
|
# broken by the child process
|
|
|
|
for stream_name in ('stdout', 'stderr'):
|
|
|
|
for action in ('close', 'remove'):
|
|
|
|
old_stream = getattr(sys, stream_name)
|
|
|
|
try:
|
|
|
|
evt = self.Event()
|
|
|
|
proc = self.Process(target=self._test_error_on_stdio_flush,
|
|
|
|
args=(evt, {stream_name: action}))
|
|
|
|
proc.start()
|
|
|
|
proc.join()
|
|
|
|
self.assertTrue(evt.is_set())
|
|
|
|
self.assertEqual(proc.exitcode, 0)
|
2017-10-22 06:40:31 -03:00
|
|
|
finally:
|
|
|
|
setattr(sys, stream_name, old_stream)
|
|
|
|
|
2017-11-03 09:34:22 -03:00
|
|
|
@classmethod
|
|
|
|
def _sleep_and_set_event(self, evt, delay=0.0):
|
|
|
|
time.sleep(delay)
|
|
|
|
evt.set()
|
|
|
|
|
|
|
|
def check_forkserver_death(self, signum):
|
|
|
|
# bpo-31308: if the forkserver process has died, we should still
|
|
|
|
# be able to create and run new Process instances (the forkserver
|
|
|
|
# is implicitly restarted).
|
|
|
|
if self.TYPE == 'threads':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
sm = multiprocessing.get_start_method()
|
|
|
|
if sm != 'forkserver':
|
|
|
|
# The fork method by design inherits all fds from the parent,
|
|
|
|
# trying to go against it is a lost battle
|
|
|
|
self.skipTest('test not appropriate for {}'.format(sm))
|
|
|
|
|
|
|
|
from multiprocessing.forkserver import _forkserver
|
|
|
|
_forkserver.ensure_running()
|
|
|
|
|
2018-07-04 06:49:41 -03:00
|
|
|
# First process sleeps 500 ms
|
|
|
|
delay = 0.5
|
|
|
|
|
2017-11-03 09:34:22 -03:00
|
|
|
evt = self.Event()
|
2018-07-04 06:49:41 -03:00
|
|
|
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
|
2017-11-03 09:34:22 -03:00
|
|
|
proc.start()
|
|
|
|
|
|
|
|
pid = _forkserver._forkserver_pid
|
|
|
|
os.kill(pid, signum)
|
2018-07-04 06:49:41 -03:00
|
|
|
# give time to the fork server to die and time to proc to complete
|
|
|
|
time.sleep(delay * 2.0)
|
2017-11-03 09:34:22 -03:00
|
|
|
|
|
|
|
evt2 = self.Event()
|
|
|
|
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
|
|
|
|
proc2.start()
|
|
|
|
proc2.join()
|
|
|
|
self.assertTrue(evt2.is_set())
|
|
|
|
self.assertEqual(proc2.exitcode, 0)
|
|
|
|
|
|
|
|
proc.join()
|
|
|
|
self.assertTrue(evt.is_set())
|
|
|
|
self.assertIn(proc.exitcode, (0, 255))
|
|
|
|
|
|
|
|
def test_forkserver_sigint(self):
|
|
|
|
# Catchable signal
|
|
|
|
self.check_forkserver_death(signal.SIGINT)
|
|
|
|
|
|
|
|
def test_forkserver_sigkill(self):
|
|
|
|
# Uncatchable signal
|
|
|
|
if os.name != 'nt':
|
|
|
|
self.check_forkserver_death(signal.SIGKILL)
|
|
|
|
|
2017-08-16 15:53:28 -03:00
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
class _UpperCaser(multiprocessing.Process):
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
multiprocessing.Process.__init__(self)
|
|
|
|
self.child_conn, self.parent_conn = multiprocessing.Pipe()
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
self.parent_conn.close()
|
|
|
|
for s in iter(self.child_conn.recv, None):
|
|
|
|
self.child_conn.send(s.upper())
|
|
|
|
self.child_conn.close()
|
|
|
|
|
|
|
|
def submit(self, s):
|
|
|
|
assert type(s) is str
|
|
|
|
self.parent_conn.send(s)
|
|
|
|
return self.parent_conn.recv()
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.parent_conn.send(None)
|
|
|
|
self.parent_conn.close()
|
|
|
|
self.child_conn.close()
|
|
|
|
|
|
|
|
class _TestSubclassingProcess(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
|
|
|
def test_subclassing(self):
|
|
|
|
uppercaser = _UpperCaser()
|
2011-09-09 15:26:57 -03:00
|
|
|
uppercaser.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
uppercaser.start()
|
|
|
|
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
|
|
|
|
self.assertEqual(uppercaser.submit('world'), 'WORLD')
|
|
|
|
uppercaser.stop()
|
|
|
|
uppercaser.join()
|
|
|
|
|
2012-01-27 05:52:37 -04:00
|
|
|
def test_stderr_flush(self):
|
|
|
|
# sys.stderr is flushed at process shutdown (issue #13812)
|
|
|
|
if self.TYPE == "threads":
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2012-01-27 05:52:37 -04:00
|
|
|
|
2020-08-07 12:18:38 -03:00
|
|
|
testfn = os_helper.TESTFN
|
|
|
|
self.addCleanup(os_helper.unlink, testfn)
|
2012-01-27 05:52:37 -04:00
|
|
|
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
|
|
|
|
proc.start()
|
|
|
|
proc.join()
|
2021-04-03 21:01:23 -03:00
|
|
|
with open(testfn, encoding="utf-8") as f:
|
2012-01-27 05:52:37 -04:00
|
|
|
err = f.read()
|
|
|
|
# The whole traceback was printed
|
|
|
|
self.assertIn("ZeroDivisionError", err)
|
|
|
|
self.assertIn("test_multiprocessing.py", err)
|
|
|
|
self.assertIn("1/0 # MARKER", err)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_stderr_flush(cls, testfn):
|
2016-03-25 05:29:50 -03:00
|
|
|
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
|
2021-04-03 21:01:23 -03:00
|
|
|
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
|
2012-01-27 05:52:37 -04:00
|
|
|
1/0 # MARKER
|
|
|
|
|
|
|
|
|
2012-06-06 15:04:57 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_sys_exit(cls, reason, testfn):
|
2016-03-25 05:29:50 -03:00
|
|
|
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
|
2021-04-03 21:01:23 -03:00
|
|
|
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
|
2012-06-06 15:04:57 -03:00
|
|
|
sys.exit(reason)
|
|
|
|
|
|
|
|
def test_sys_exit(self):
|
|
|
|
# See Issue 13854
|
|
|
|
if self.TYPE == 'threads':
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2012-06-06 15:04:57 -03:00
|
|
|
|
2020-08-07 12:18:38 -03:00
|
|
|
testfn = os_helper.TESTFN
|
|
|
|
self.addCleanup(os_helper.unlink, testfn)
|
2012-06-06 15:04:57 -03:00
|
|
|
|
2016-03-25 05:29:50 -03:00
|
|
|
for reason in (
|
|
|
|
[1, 2, 3],
|
|
|
|
'ignore this',
|
|
|
|
):
|
2012-06-06 15:04:57 -03:00
|
|
|
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2016-03-25 05:29:50 -03:00
|
|
|
self.assertEqual(p.exitcode, 1)
|
2012-06-06 15:04:57 -03:00
|
|
|
|
2021-04-03 21:01:23 -03:00
|
|
|
with open(testfn, encoding="utf-8") as f:
|
2016-03-25 05:29:50 -03:00
|
|
|
content = f.read()
|
|
|
|
self.assertEqual(content.rstrip(), str(reason))
|
|
|
|
|
|
|
|
os.unlink(testfn)
|
2012-06-06 15:04:57 -03:00
|
|
|
|
2020-02-21 05:33:04 -04:00
|
|
|
cases = [
|
|
|
|
((True,), 1),
|
|
|
|
((False,), 0),
|
|
|
|
((8,), 8),
|
|
|
|
((None,), 0),
|
|
|
|
((), 0),
|
|
|
|
]
|
|
|
|
|
|
|
|
for args, expected in cases:
|
|
|
|
with self.subTest(args=args):
|
|
|
|
p = self.Process(target=sys.exit, args=args)
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
join_process(p)
|
|
|
|
self.assertEqual(p.exitcode, expected)
|
2012-06-06 15:04:57 -03:00
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
def queue_empty(q):
|
|
|
|
if hasattr(q, 'empty'):
|
|
|
|
return q.empty()
|
|
|
|
else:
|
|
|
|
return q.qsize() == 0
|
|
|
|
|
|
|
|
def queue_full(q, maxsize):
|
|
|
|
if hasattr(q, 'full'):
|
|
|
|
return q.full()
|
|
|
|
else:
|
|
|
|
return q.qsize() == maxsize
|
|
|
|
|
|
|
|
|
|
|
|
class _TestQueue(BaseTestCase):
|
|
|
|
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_put(cls, queue, child_can_start, parent_can_continue):
|
2008-06-11 13:44:04 -03:00
|
|
|
child_can_start.wait()
|
|
|
|
for i in range(6):
|
|
|
|
queue.get()
|
|
|
|
parent_can_continue.set()
|
|
|
|
|
|
|
|
def test_put(self):
|
|
|
|
MAXSIZE = 6
|
|
|
|
queue = self.Queue(maxsize=MAXSIZE)
|
|
|
|
child_can_start = self.Event()
|
|
|
|
parent_can_continue = self.Event()
|
|
|
|
|
|
|
|
proc = self.Process(
|
|
|
|
target=self._test_put,
|
|
|
|
args=(queue, child_can_start, parent_can_continue)
|
|
|
|
)
|
2008-08-19 16:17:39 -03:00
|
|
|
proc.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
proc.start()
|
|
|
|
|
|
|
|
self.assertEqual(queue_empty(queue), True)
|
|
|
|
self.assertEqual(queue_full(queue, MAXSIZE), False)
|
|
|
|
|
|
|
|
queue.put(1)
|
|
|
|
queue.put(2, True)
|
|
|
|
queue.put(3, True, None)
|
|
|
|
queue.put(4, False)
|
|
|
|
queue.put(5, False, None)
|
|
|
|
queue.put_nowait(6)
|
|
|
|
|
|
|
|
# the values may be in buffer but not yet in pipe so sleep a bit
|
|
|
|
time.sleep(DELTA)
|
|
|
|
|
|
|
|
self.assertEqual(queue_empty(queue), False)
|
|
|
|
self.assertEqual(queue_full(queue, MAXSIZE), True)
|
|
|
|
|
|
|
|
put = TimingWrapper(queue.put)
|
|
|
|
put_nowait = TimingWrapper(queue.put_nowait)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Full, put, 7, False)
|
|
|
|
self.assertTimingAlmostEqual(put.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Full, put, 7, False, None)
|
|
|
|
self.assertTimingAlmostEqual(put.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Full, put_nowait, 7)
|
|
|
|
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
|
|
|
|
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
|
|
|
|
self.assertTimingAlmostEqual(put.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
|
|
|
|
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
|
|
|
|
|
|
|
|
child_can_start.set()
|
|
|
|
parent_can_continue.wait()
|
|
|
|
|
|
|
|
self.assertEqual(queue_empty(queue), True)
|
|
|
|
self.assertEqual(queue_full(queue, MAXSIZE), False)
|
|
|
|
|
|
|
|
proc.join()
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(queue)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_get(cls, queue, child_can_start, parent_can_continue):
|
2008-06-11 13:44:04 -03:00
|
|
|
child_can_start.wait()
|
2008-06-16 17:57:14 -03:00
|
|
|
#queue.put(1)
|
2008-06-11 13:44:04 -03:00
|
|
|
queue.put(2)
|
|
|
|
queue.put(3)
|
|
|
|
queue.put(4)
|
|
|
|
queue.put(5)
|
|
|
|
parent_can_continue.set()
|
|
|
|
|
|
|
|
def test_get(self):
|
|
|
|
queue = self.Queue()
|
|
|
|
child_can_start = self.Event()
|
|
|
|
parent_can_continue = self.Event()
|
|
|
|
|
|
|
|
proc = self.Process(
|
|
|
|
target=self._test_get,
|
|
|
|
args=(queue, child_can_start, parent_can_continue)
|
|
|
|
)
|
2008-08-19 16:17:39 -03:00
|
|
|
proc.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
proc.start()
|
|
|
|
|
|
|
|
self.assertEqual(queue_empty(queue), True)
|
|
|
|
|
|
|
|
child_can_start.set()
|
|
|
|
parent_can_continue.wait()
|
|
|
|
|
|
|
|
time.sleep(DELTA)
|
|
|
|
self.assertEqual(queue_empty(queue), False)
|
|
|
|
|
2008-06-16 17:57:14 -03:00
|
|
|
# Hangs unexpectedly, remove for now
|
|
|
|
#self.assertEqual(queue.get(), 1)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertEqual(queue.get(True, None), 2)
|
|
|
|
self.assertEqual(queue.get(True), 3)
|
|
|
|
self.assertEqual(queue.get(timeout=1), 4)
|
|
|
|
self.assertEqual(queue.get_nowait(), 5)
|
|
|
|
|
|
|
|
self.assertEqual(queue_empty(queue), True)
|
|
|
|
|
|
|
|
get = TimingWrapper(queue.get)
|
|
|
|
get_nowait = TimingWrapper(queue.get_nowait)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Empty, get, False)
|
|
|
|
self.assertTimingAlmostEqual(get.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Empty, get, False, None)
|
|
|
|
self.assertTimingAlmostEqual(get.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Empty, get_nowait)
|
|
|
|
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
|
|
|
|
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
|
|
|
|
self.assertTimingAlmostEqual(get.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
|
|
|
|
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
|
|
|
|
|
|
|
|
proc.join()
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(queue)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_fork(cls, queue):
|
2008-06-11 13:44:04 -03:00
|
|
|
for i in range(10, 20):
|
|
|
|
queue.put(i)
|
|
|
|
# note that at this point the items may only be buffered, so the
|
|
|
|
# process cannot shutdown until the feeder thread has finished
|
|
|
|
# pushing items onto the pipe.
|
|
|
|
|
|
|
|
def test_fork(self):
|
|
|
|
# Old versions of Queue would fail to create a new feeder
|
|
|
|
# thread for a forked process if the original process had its
|
|
|
|
# own feeder thread. This test checks that this no longer
|
|
|
|
# happens.
|
|
|
|
|
|
|
|
queue = self.Queue()
|
|
|
|
|
|
|
|
# put items on queue so that main process starts a feeder thread
|
|
|
|
for i in range(10):
|
|
|
|
queue.put(i)
|
|
|
|
|
|
|
|
# wait to make sure thread starts before we fork a new process
|
|
|
|
time.sleep(DELTA)
|
|
|
|
|
|
|
|
# fork process
|
|
|
|
p = self.Process(target=self._test_fork, args=(queue,))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
|
|
|
|
# check that all expected items are in the queue
|
|
|
|
for i in range(20):
|
|
|
|
self.assertEqual(queue.get(), i)
|
|
|
|
self.assertRaises(pyqueue.Empty, queue.get, False)
|
|
|
|
|
|
|
|
p.join()
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(queue)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_qsize(self):
|
|
|
|
q = self.Queue()
|
|
|
|
try:
|
|
|
|
self.assertEqual(q.qsize(), 0)
|
|
|
|
except NotImplementedError:
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('qsize method not implemented')
|
2008-06-11 13:44:04 -03:00
|
|
|
q.put(1)
|
|
|
|
self.assertEqual(q.qsize(), 1)
|
|
|
|
q.put(5)
|
|
|
|
self.assertEqual(q.qsize(), 2)
|
|
|
|
q.get()
|
|
|
|
self.assertEqual(q.qsize(), 1)
|
|
|
|
q.get()
|
|
|
|
self.assertEqual(q.qsize(), 0)
|
2017-07-24 19:33:56 -03:00
|
|
|
close_queue(q)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_task_done(cls, q):
|
2008-06-11 13:44:04 -03:00
|
|
|
for obj in iter(q.get, None):
|
|
|
|
time.sleep(DELTA)
|
|
|
|
q.task_done()
|
|
|
|
|
|
|
|
def test_task_done(self):
|
|
|
|
queue = self.JoinableQueue()
|
|
|
|
|
|
|
|
workers = [self.Process(target=self._test_task_done, args=(queue,))
|
|
|
|
for i in range(4)]
|
|
|
|
|
|
|
|
for p in workers:
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
|
|
|
|
for i in range(10):
|
|
|
|
queue.put(i)
|
|
|
|
|
|
|
|
queue.join()
|
|
|
|
|
|
|
|
for p in workers:
|
|
|
|
queue.put(None)
|
|
|
|
|
|
|
|
for p in workers:
|
|
|
|
p.join()
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(queue)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2015-03-06 17:32:54 -04:00
|
|
|
def test_no_import_lock_contention(self):
|
2020-08-07 12:18:38 -03:00
|
|
|
with os_helper.temp_cwd():
|
2015-03-06 17:32:54 -04:00
|
|
|
module_name = 'imported_by_an_imported_module'
|
2021-04-03 21:01:23 -03:00
|
|
|
with open(module_name + '.py', 'w', encoding="utf-8") as f:
|
2015-03-06 17:32:54 -04:00
|
|
|
f.write("""if 1:
|
|
|
|
import multiprocessing
|
|
|
|
|
|
|
|
q = multiprocessing.Queue()
|
|
|
|
q.put('knock knock')
|
|
|
|
q.get(timeout=3)
|
|
|
|
q.close()
|
|
|
|
del q
|
|
|
|
""")
|
|
|
|
|
2020-08-07 12:18:38 -03:00
|
|
|
with import_helper.DirsOnSysPath(os.getcwd()):
|
2015-03-06 17:32:54 -04:00
|
|
|
try:
|
|
|
|
__import__(module_name)
|
|
|
|
except pyqueue.Empty:
|
|
|
|
self.fail("Probable regression on import lock contention;"
|
|
|
|
" see Issue #22853")
|
|
|
|
|
2013-04-17 08:12:27 -03:00
|
|
|
def test_timeout(self):
|
|
|
|
q = multiprocessing.Queue()
|
2018-12-17 04:36:36 -04:00
|
|
|
start = time.monotonic()
|
2015-02-05 09:25:05 -04:00
|
|
|
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
|
2018-12-17 04:36:36 -04:00
|
|
|
delta = time.monotonic() - start
|
2018-08-02 21:09:00 -03:00
|
|
|
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
|
|
|
|
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
|
|
|
|
# failed because the delta was only 135.8 ms.
|
|
|
|
self.assertGreaterEqual(delta, 0.100)
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(q)
|
2013-04-17 08:12:27 -03:00
|
|
|
|
2017-05-25 11:22:57 -03:00
|
|
|
def test_queue_feeder_donot_stop_onexc(self):
|
|
|
|
# bpo-30414: verify feeder handles exceptions correctly
|
|
|
|
if self.TYPE != 'processes':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
class NotSerializable(object):
|
|
|
|
def __reduce__(self):
|
|
|
|
raise AttributeError
|
|
|
|
with test.support.captured_stderr():
|
|
|
|
q = self.Queue()
|
|
|
|
q.put(NotSerializable())
|
|
|
|
q.put(True)
|
2019-12-11 17:17:04 -04:00
|
|
|
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
|
2017-07-24 19:33:56 -03:00
|
|
|
close_queue(q)
|
2018-03-21 12:50:28 -03:00
|
|
|
|
|
|
|
with test.support.captured_stderr():
|
|
|
|
# bpo-33078: verify that the queue size is correctly handled
|
|
|
|
# on errors.
|
|
|
|
q = self.Queue(maxsize=1)
|
|
|
|
q.put(NotSerializable())
|
|
|
|
q.put(True)
|
2018-03-21 14:56:27 -03:00
|
|
|
try:
|
|
|
|
self.assertEqual(q.qsize(), 1)
|
|
|
|
except NotImplementedError:
|
|
|
|
# qsize is not available on all platform as it
|
|
|
|
# relies on sem_getvalue
|
|
|
|
pass
|
2019-12-11 17:17:04 -04:00
|
|
|
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
|
2018-03-21 12:50:28 -03:00
|
|
|
# Check that the size of the queue is correct
|
2018-03-21 14:56:27 -03:00
|
|
|
self.assertTrue(q.empty())
|
2018-03-21 12:50:28 -03:00
|
|
|
close_queue(q)
|
2017-05-25 11:22:57 -03:00
|
|
|
|
2018-01-05 06:15:54 -04:00
|
|
|
def test_queue_feeder_on_queue_feeder_error(self):
|
|
|
|
# bpo-30006: verify feeder handles exceptions using the
|
|
|
|
# _on_queue_feeder_error hook.
|
|
|
|
if self.TYPE != 'processes':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
class NotSerializable(object):
|
|
|
|
"""Mock unserializable object"""
|
|
|
|
def __init__(self):
|
|
|
|
self.reduce_was_called = False
|
|
|
|
self.on_queue_feeder_error_was_called = False
|
|
|
|
|
|
|
|
def __reduce__(self):
|
|
|
|
self.reduce_was_called = True
|
|
|
|
raise AttributeError
|
|
|
|
|
|
|
|
class SafeQueue(multiprocessing.queues.Queue):
|
|
|
|
"""Queue with overloaded _on_queue_feeder_error hook"""
|
|
|
|
@staticmethod
|
|
|
|
def _on_queue_feeder_error(e, obj):
|
|
|
|
if (isinstance(e, AttributeError) and
|
|
|
|
isinstance(obj, NotSerializable)):
|
|
|
|
obj.on_queue_feeder_error_was_called = True
|
|
|
|
|
|
|
|
not_serializable_obj = NotSerializable()
|
|
|
|
# The captured_stderr reduces the noise in the test report
|
|
|
|
with test.support.captured_stderr():
|
|
|
|
q = SafeQueue(ctx=multiprocessing.get_context())
|
|
|
|
q.put(not_serializable_obj)
|
|
|
|
|
2018-04-20 17:08:45 -03:00
|
|
|
# Verify that q is still functioning correctly
|
2018-01-05 06:15:54 -04:00
|
|
|
q.put(True)
|
2019-12-11 17:17:04 -04:00
|
|
|
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
|
2018-01-05 06:15:54 -04:00
|
|
|
|
|
|
|
# Assert that the serialization and the hook have been called correctly
|
|
|
|
self.assertTrue(not_serializable_obj.reduce_was_called)
|
|
|
|
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
|
2018-10-13 06:26:09 -03:00
|
|
|
|
|
|
|
def test_closed_queue_put_get_exceptions(self):
|
|
|
|
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
|
|
|
|
q.close()
|
|
|
|
with self.assertRaisesRegex(ValueError, 'is closed'):
|
|
|
|
q.put('foo')
|
|
|
|
with self.assertRaisesRegex(ValueError, 'is closed'):
|
|
|
|
q.get()
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
class _TestLock(BaseTestCase):
|
|
|
|
|
|
|
|
def test_lock(self):
|
|
|
|
lock = self.Lock()
|
|
|
|
self.assertEqual(lock.acquire(), True)
|
|
|
|
self.assertEqual(lock.acquire(False), False)
|
|
|
|
self.assertEqual(lock.release(), None)
|
|
|
|
self.assertRaises((ValueError, threading.ThreadError), lock.release)
|
|
|
|
|
|
|
|
def test_rlock(self):
|
|
|
|
lock = self.RLock()
|
|
|
|
self.assertEqual(lock.acquire(), True)
|
|
|
|
self.assertEqual(lock.acquire(), True)
|
|
|
|
self.assertEqual(lock.acquire(), True)
|
|
|
|
self.assertEqual(lock.release(), None)
|
|
|
|
self.assertEqual(lock.release(), None)
|
|
|
|
self.assertEqual(lock.release(), None)
|
|
|
|
self.assertRaises((AssertionError, RuntimeError), lock.release)
|
|
|
|
|
2009-03-31 00:25:07 -03:00
|
|
|
def test_lock_context(self):
|
|
|
|
with self.Lock():
|
|
|
|
pass
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
class _TestSemaphore(BaseTestCase):
|
|
|
|
|
|
|
|
def _test_semaphore(self, sem):
|
|
|
|
self.assertReturnsIfImplemented(2, get_value, sem)
|
|
|
|
self.assertEqual(sem.acquire(), True)
|
|
|
|
self.assertReturnsIfImplemented(1, get_value, sem)
|
|
|
|
self.assertEqual(sem.acquire(), True)
|
|
|
|
self.assertReturnsIfImplemented(0, get_value, sem)
|
|
|
|
self.assertEqual(sem.acquire(False), False)
|
|
|
|
self.assertReturnsIfImplemented(0, get_value, sem)
|
|
|
|
self.assertEqual(sem.release(), None)
|
|
|
|
self.assertReturnsIfImplemented(1, get_value, sem)
|
|
|
|
self.assertEqual(sem.release(), None)
|
|
|
|
self.assertReturnsIfImplemented(2, get_value, sem)
|
|
|
|
|
|
|
|
def test_semaphore(self):
|
|
|
|
sem = self.Semaphore(2)
|
|
|
|
self._test_semaphore(sem)
|
|
|
|
self.assertEqual(sem.release(), None)
|
|
|
|
self.assertReturnsIfImplemented(3, get_value, sem)
|
|
|
|
self.assertEqual(sem.release(), None)
|
|
|
|
self.assertReturnsIfImplemented(4, get_value, sem)
|
|
|
|
|
|
|
|
def test_bounded_semaphore(self):
|
|
|
|
sem = self.BoundedSemaphore(2)
|
|
|
|
self._test_semaphore(sem)
|
|
|
|
# Currently fails on OS/X
|
|
|
|
#if HAVE_GETVALUE:
|
|
|
|
# self.assertRaises(ValueError, sem.release)
|
|
|
|
# self.assertReturnsIfImplemented(2, get_value, sem)
|
|
|
|
|
|
|
|
def test_timeout(self):
|
|
|
|
if self.TYPE != 'processes':
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
sem = self.Semaphore(0)
|
|
|
|
acquire = TimingWrapper(sem.acquire)
|
|
|
|
|
|
|
|
self.assertEqual(acquire(False), False)
|
|
|
|
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
|
|
|
|
|
|
|
|
self.assertEqual(acquire(False, None), False)
|
|
|
|
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
|
|
|
|
|
|
|
|
self.assertEqual(acquire(False, TIMEOUT1), False)
|
|
|
|
self.assertTimingAlmostEqual(acquire.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertEqual(acquire(True, TIMEOUT2), False)
|
|
|
|
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
|
|
|
|
|
|
|
|
self.assertEqual(acquire(timeout=TIMEOUT3), False)
|
|
|
|
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
|
|
|
|
|
|
|
|
|
|
|
|
class _TestCondition(BaseTestCase):
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def f(cls, cond, sleeping, woken, timeout=None):
|
2008-06-11 13:44:04 -03:00
|
|
|
cond.acquire()
|
|
|
|
sleeping.release()
|
|
|
|
cond.wait(timeout)
|
|
|
|
woken.release()
|
|
|
|
cond.release()
|
|
|
|
|
2017-07-04 03:59:22 -03:00
|
|
|
def assertReachesEventually(self, func, value):
|
|
|
|
for i in range(10):
|
|
|
|
try:
|
|
|
|
if func() == value:
|
|
|
|
break
|
|
|
|
except NotImplementedError:
|
|
|
|
break
|
|
|
|
time.sleep(DELTA)
|
|
|
|
time.sleep(DELTA)
|
|
|
|
self.assertReturnsIfImplemented(value, func)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def check_invariant(self, cond):
|
|
|
|
# this is only supposed to succeed when there are no sleepers
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
try:
|
|
|
|
sleepers = (cond._sleeping_count.get_value() -
|
|
|
|
cond._woken_count.get_value())
|
|
|
|
self.assertEqual(sleepers, 0)
|
|
|
|
self.assertEqual(cond._wait_semaphore.get_value(), 0)
|
|
|
|
except NotImplementedError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def test_notify(self):
|
|
|
|
cond = self.Condition()
|
|
|
|
sleeping = self.Semaphore(0)
|
|
|
|
woken = self.Semaphore(0)
|
|
|
|
|
|
|
|
p = self.Process(target=self.f, args=(cond, sleeping, woken))
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(p.join)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(p.join)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
# wait for both children to start sleeping
|
|
|
|
sleeping.acquire()
|
|
|
|
sleeping.acquire()
|
|
|
|
|
|
|
|
# check no process/thread has woken up
|
|
|
|
time.sleep(DELTA)
|
|
|
|
self.assertReturnsIfImplemented(0, get_value, woken)
|
|
|
|
|
|
|
|
# wake up one process/thread
|
|
|
|
cond.acquire()
|
|
|
|
cond.notify()
|
|
|
|
cond.release()
|
|
|
|
|
|
|
|
# check one process/thread has woken up
|
|
|
|
time.sleep(DELTA)
|
|
|
|
self.assertReturnsIfImplemented(1, get_value, woken)
|
|
|
|
|
|
|
|
# wake up another
|
|
|
|
cond.acquire()
|
|
|
|
cond.notify()
|
|
|
|
cond.release()
|
|
|
|
|
|
|
|
# check other has woken up
|
|
|
|
time.sleep(DELTA)
|
|
|
|
self.assertReturnsIfImplemented(2, get_value, woken)
|
|
|
|
|
|
|
|
# check state is not mucked up
|
|
|
|
self.check_invariant(cond)
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
def test_notify_all(self):
|
|
|
|
cond = self.Condition()
|
|
|
|
sleeping = self.Semaphore(0)
|
|
|
|
woken = self.Semaphore(0)
|
|
|
|
|
|
|
|
# start some threads/processes which will timeout
|
|
|
|
for i in range(3):
|
|
|
|
p = self.Process(target=self.f,
|
|
|
|
args=(cond, sleeping, woken, TIMEOUT1))
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(p.join)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
t = threading.Thread(target=self.f,
|
|
|
|
args=(cond, sleeping, woken, TIMEOUT1))
|
2008-08-18 15:09:21 -03:00
|
|
|
t.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
t.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(t.join)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
# wait for them all to sleep
|
|
|
|
for i in range(6):
|
|
|
|
sleeping.acquire()
|
|
|
|
|
|
|
|
# check they have all timed out
|
|
|
|
for i in range(6):
|
|
|
|
woken.acquire()
|
|
|
|
self.assertReturnsIfImplemented(0, get_value, woken)
|
|
|
|
|
|
|
|
# check state is not mucked up
|
|
|
|
self.check_invariant(cond)
|
|
|
|
|
|
|
|
# start some more threads/processes
|
|
|
|
for i in range(3):
|
|
|
|
p = self.Process(target=self.f, args=(cond, sleeping, woken))
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(p.join)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
|
2008-08-18 15:09:21 -03:00
|
|
|
t.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
t.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(t.join)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
# wait for them to all sleep
|
|
|
|
for i in range(6):
|
|
|
|
sleeping.acquire()
|
|
|
|
|
|
|
|
# check no process/thread has woken up
|
|
|
|
time.sleep(DELTA)
|
|
|
|
self.assertReturnsIfImplemented(0, get_value, woken)
|
|
|
|
|
|
|
|
# wake them all up
|
|
|
|
cond.acquire()
|
|
|
|
cond.notify_all()
|
|
|
|
cond.release()
|
|
|
|
|
|
|
|
# check they have all woken
|
2017-07-04 03:59:22 -03:00
|
|
|
self.assertReachesEventually(lambda: get_value(woken), 6)
|
|
|
|
|
|
|
|
# check state is not mucked up
|
|
|
|
self.check_invariant(cond)
|
|
|
|
|
|
|
|
def test_notify_n(self):
|
|
|
|
cond = self.Condition()
|
|
|
|
sleeping = self.Semaphore(0)
|
|
|
|
woken = self.Semaphore(0)
|
|
|
|
|
|
|
|
# start some threads/processes
|
|
|
|
for i in range(3):
|
|
|
|
p = self.Process(target=self.f, args=(cond, sleeping, woken))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(p.join)
|
2017-07-04 03:59:22 -03:00
|
|
|
|
|
|
|
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
|
|
|
|
t.daemon = True
|
|
|
|
t.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(t.join)
|
2017-07-04 03:59:22 -03:00
|
|
|
|
|
|
|
# wait for them to all sleep
|
|
|
|
for i in range(6):
|
|
|
|
sleeping.acquire()
|
|
|
|
|
|
|
|
# check no process/thread has woken up
|
|
|
|
time.sleep(DELTA)
|
|
|
|
self.assertReturnsIfImplemented(0, get_value, woken)
|
|
|
|
|
|
|
|
# wake some of them up
|
|
|
|
cond.acquire()
|
|
|
|
cond.notify(n=2)
|
|
|
|
cond.release()
|
|
|
|
|
|
|
|
# check 2 have woken
|
|
|
|
self.assertReachesEventually(lambda: get_value(woken), 2)
|
|
|
|
|
|
|
|
# wake the rest of them
|
|
|
|
cond.acquire()
|
|
|
|
cond.notify(n=4)
|
|
|
|
cond.release()
|
|
|
|
|
|
|
|
self.assertReachesEventually(lambda: get_value(woken), 6)
|
|
|
|
|
|
|
|
# doesn't do anything more
|
|
|
|
cond.acquire()
|
|
|
|
cond.notify(n=3)
|
|
|
|
cond.release()
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertReturnsIfImplemented(6, get_value, woken)
|
|
|
|
|
|
|
|
# check state is not mucked up
|
|
|
|
self.check_invariant(cond)
|
|
|
|
|
|
|
|
def test_timeout(self):
|
|
|
|
cond = self.Condition()
|
|
|
|
wait = TimingWrapper(cond.wait)
|
|
|
|
cond.acquire()
|
|
|
|
res = wait(TIMEOUT1)
|
|
|
|
cond.release()
|
2010-10-28 06:24:56 -03:00
|
|
|
self.assertEqual(res, False)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
|
|
|
|
|
2012-04-17 13:45:57 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_waitfor_f(cls, cond, state):
|
|
|
|
with cond:
|
|
|
|
state.value = 0
|
|
|
|
cond.notify()
|
|
|
|
result = cond.wait_for(lambda : state.value==4)
|
|
|
|
if not result or state.value != 4:
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
|
|
|
|
def test_waitfor(self):
|
|
|
|
# based on test in test/lock_tests.py
|
|
|
|
cond = self.Condition()
|
|
|
|
state = self.Value('i', -1)
|
|
|
|
|
|
|
|
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
|
|
|
|
with cond:
|
|
|
|
result = cond.wait_for(lambda : state.value==0)
|
|
|
|
self.assertTrue(result)
|
|
|
|
self.assertEqual(state.value, 0)
|
|
|
|
|
|
|
|
for i in range(4):
|
|
|
|
time.sleep(0.01)
|
|
|
|
with cond:
|
|
|
|
state.value += 1
|
|
|
|
cond.notify()
|
|
|
|
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2012-04-17 13:45:57 -03:00
|
|
|
self.assertEqual(p.exitcode, 0)
|
|
|
|
|
|
|
|
@classmethod
|
2012-05-06 12:46:36 -03:00
|
|
|
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
|
|
|
|
sem.release()
|
2012-04-17 13:45:57 -03:00
|
|
|
with cond:
|
2023-10-05 16:32:06 -03:00
|
|
|
expected = 0.100
|
2018-12-17 04:36:36 -04:00
|
|
|
dt = time.monotonic()
|
2012-04-17 13:45:57 -03:00
|
|
|
result = cond.wait_for(lambda : state.value==4, timeout=expected)
|
2018-12-17 04:36:36 -04:00
|
|
|
dt = time.monotonic() - dt
|
2023-10-10 22:49:09 -03:00
|
|
|
if not result and (expected - CLOCK_RES) <= dt:
|
2012-04-17 13:45:57 -03:00
|
|
|
success.value = True
|
|
|
|
|
|
|
|
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
|
|
|
|
def test_waitfor_timeout(self):
|
|
|
|
# based on test in test/lock_tests.py
|
|
|
|
cond = self.Condition()
|
|
|
|
state = self.Value('i', 0)
|
|
|
|
success = self.Value('i', False)
|
2012-05-06 12:46:36 -03:00
|
|
|
sem = self.Semaphore(0)
|
2012-04-17 13:45:57 -03:00
|
|
|
|
|
|
|
p = self.Process(target=self._test_waitfor_timeout_f,
|
2012-05-06 12:46:36 -03:00
|
|
|
args=(cond, state, success, sem))
|
2012-04-17 13:45:57 -03:00
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
2019-10-30 08:41:43 -03:00
|
|
|
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
|
2012-04-17 13:45:57 -03:00
|
|
|
|
|
|
|
# Only increment 3 times, so state == 4 is never reached.
|
|
|
|
for i in range(3):
|
2023-10-05 16:32:06 -03:00
|
|
|
time.sleep(0.010)
|
2012-04-17 13:45:57 -03:00
|
|
|
with cond:
|
|
|
|
state.value += 1
|
|
|
|
cond.notify()
|
|
|
|
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2012-04-17 13:45:57 -03:00
|
|
|
self.assertTrue(success.value)
|
|
|
|
|
2012-06-05 09:15:29 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_wait_result(cls, c, pid):
|
|
|
|
with c:
|
|
|
|
c.notify()
|
|
|
|
time.sleep(1)
|
|
|
|
if pid is not None:
|
|
|
|
os.kill(pid, signal.SIGINT)
|
|
|
|
|
|
|
|
def test_wait_result(self):
|
|
|
|
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
|
|
|
|
pid = os.getpid()
|
|
|
|
else:
|
|
|
|
pid = None
|
|
|
|
|
|
|
|
c = self.Condition()
|
|
|
|
with c:
|
|
|
|
self.assertFalse(c.wait(0))
|
|
|
|
self.assertFalse(c.wait(0.1))
|
|
|
|
|
|
|
|
p = self.Process(target=self._test_wait_result, args=(c, pid))
|
|
|
|
p.start()
|
|
|
|
|
2018-06-27 17:24:02 -03:00
|
|
|
self.assertTrue(c.wait(60))
|
2012-06-05 09:15:29 -03:00
|
|
|
if pid is not None:
|
2018-06-27 17:24:02 -03:00
|
|
|
self.assertRaises(KeyboardInterrupt, c.wait, 60)
|
2012-06-05 09:15:29 -03:00
|
|
|
|
|
|
|
p.join()
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
class _TestEvent(BaseTestCase):
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_event(cls, event):
|
2008-06-11 13:44:04 -03:00
|
|
|
time.sleep(TIMEOUT2)
|
|
|
|
event.set()
|
|
|
|
|
|
|
|
def test_event(self):
|
|
|
|
event = self.Event()
|
|
|
|
wait = TimingWrapper(event.wait)
|
|
|
|
|
2011-03-16 06:05:33 -03:00
|
|
|
# Removed temporarily, due to API shear, this does not
|
2008-06-11 13:44:04 -03:00
|
|
|
# work with threading._Event objects. is_set == isSet
|
Merged revisions 70908,70939,71009,71022,71036 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70908 | jesse.noller | 2009-03-31 17:20:35 -0500 (Tue, 31 Mar 2009) | 1 line
Issue 5619: Pass MS CRT debug flags into subprocesses
........
r70939 | jesse.noller | 2009-03-31 22:45:50 -0500 (Tue, 31 Mar 2009) | 1 line
Fix multiprocessing.event to match the new threading.Event API
........
r71009 | jesse.noller | 2009-04-01 19:03:28 -0500 (Wed, 01 Apr 2009) | 1 line
issue5545: Switch to Autoconf for multiprocessing; special thanks to Martin Lowis for help
........
r71022 | jesse.noller | 2009-04-01 21:32:55 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3110: Additional protection for SEM_VALUE_MAX on platforms, thanks to Martin Loewis
........
r71036 | jesse.noller | 2009-04-01 23:22:09 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3551: Raise ValueError if the size causes ERROR_NO_SYSTEM_RESOURCES
........
2009-04-05 18:24:58 -03:00
|
|
|
self.assertEqual(event.is_set(), False)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
Merged revisions 70908,70939,71009,71022,71036 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70908 | jesse.noller | 2009-03-31 17:20:35 -0500 (Tue, 31 Mar 2009) | 1 line
Issue 5619: Pass MS CRT debug flags into subprocesses
........
r70939 | jesse.noller | 2009-03-31 22:45:50 -0500 (Tue, 31 Mar 2009) | 1 line
Fix multiprocessing.event to match the new threading.Event API
........
r71009 | jesse.noller | 2009-04-01 19:03:28 -0500 (Wed, 01 Apr 2009) | 1 line
issue5545: Switch to Autoconf for multiprocessing; special thanks to Martin Lowis for help
........
r71022 | jesse.noller | 2009-04-01 21:32:55 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3110: Additional protection for SEM_VALUE_MAX on platforms, thanks to Martin Loewis
........
r71036 | jesse.noller | 2009-04-01 23:22:09 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3551: Raise ValueError if the size causes ERROR_NO_SYSTEM_RESOURCES
........
2009-04-05 18:24:58 -03:00
|
|
|
# Removed, threading.Event.wait() will return the value of the __flag
|
|
|
|
# instead of None. API Shear with the semaphore backed mp.Event
|
|
|
|
self.assertEqual(wait(0.0), False)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
|
Merged revisions 70908,70939,71009,71022,71036 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70908 | jesse.noller | 2009-03-31 17:20:35 -0500 (Tue, 31 Mar 2009) | 1 line
Issue 5619: Pass MS CRT debug flags into subprocesses
........
r70939 | jesse.noller | 2009-03-31 22:45:50 -0500 (Tue, 31 Mar 2009) | 1 line
Fix multiprocessing.event to match the new threading.Event API
........
r71009 | jesse.noller | 2009-04-01 19:03:28 -0500 (Wed, 01 Apr 2009) | 1 line
issue5545: Switch to Autoconf for multiprocessing; special thanks to Martin Lowis for help
........
r71022 | jesse.noller | 2009-04-01 21:32:55 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3110: Additional protection for SEM_VALUE_MAX on platforms, thanks to Martin Loewis
........
r71036 | jesse.noller | 2009-04-01 23:22:09 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3551: Raise ValueError if the size causes ERROR_NO_SYSTEM_RESOURCES
........
2009-04-05 18:24:58 -03:00
|
|
|
self.assertEqual(wait(TIMEOUT1), False)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
|
|
|
|
|
|
|
|
event.set()
|
|
|
|
|
|
|
|
# See note above on the API differences
|
Merged revisions 70908,70939,71009,71022,71036 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70908 | jesse.noller | 2009-03-31 17:20:35 -0500 (Tue, 31 Mar 2009) | 1 line
Issue 5619: Pass MS CRT debug flags into subprocesses
........
r70939 | jesse.noller | 2009-03-31 22:45:50 -0500 (Tue, 31 Mar 2009) | 1 line
Fix multiprocessing.event to match the new threading.Event API
........
r71009 | jesse.noller | 2009-04-01 19:03:28 -0500 (Wed, 01 Apr 2009) | 1 line
issue5545: Switch to Autoconf for multiprocessing; special thanks to Martin Lowis for help
........
r71022 | jesse.noller | 2009-04-01 21:32:55 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3110: Additional protection for SEM_VALUE_MAX on platforms, thanks to Martin Loewis
........
r71036 | jesse.noller | 2009-04-01 23:22:09 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3551: Raise ValueError if the size causes ERROR_NO_SYSTEM_RESOURCES
........
2009-04-05 18:24:58 -03:00
|
|
|
self.assertEqual(event.is_set(), True)
|
|
|
|
self.assertEqual(wait(), True)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
|
Merged revisions 70908,70939,71009,71022,71036 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70908 | jesse.noller | 2009-03-31 17:20:35 -0500 (Tue, 31 Mar 2009) | 1 line
Issue 5619: Pass MS CRT debug flags into subprocesses
........
r70939 | jesse.noller | 2009-03-31 22:45:50 -0500 (Tue, 31 Mar 2009) | 1 line
Fix multiprocessing.event to match the new threading.Event API
........
r71009 | jesse.noller | 2009-04-01 19:03:28 -0500 (Wed, 01 Apr 2009) | 1 line
issue5545: Switch to Autoconf for multiprocessing; special thanks to Martin Lowis for help
........
r71022 | jesse.noller | 2009-04-01 21:32:55 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3110: Additional protection for SEM_VALUE_MAX on platforms, thanks to Martin Loewis
........
r71036 | jesse.noller | 2009-04-01 23:22:09 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3551: Raise ValueError if the size causes ERROR_NO_SYSTEM_RESOURCES
........
2009-04-05 18:24:58 -03:00
|
|
|
self.assertEqual(wait(TIMEOUT1), True)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
|
|
|
|
# self.assertEqual(event.is_set(), True)
|
|
|
|
|
|
|
|
event.clear()
|
|
|
|
|
|
|
|
#self.assertEqual(event.is_set(), False)
|
|
|
|
|
2011-09-09 15:26:57 -03:00
|
|
|
p = self.Process(target=self._test_event, args=(event,))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
Merged revisions 70908,70939,71009,71022,71036 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70908 | jesse.noller | 2009-03-31 17:20:35 -0500 (Tue, 31 Mar 2009) | 1 line
Issue 5619: Pass MS CRT debug flags into subprocesses
........
r70939 | jesse.noller | 2009-03-31 22:45:50 -0500 (Tue, 31 Mar 2009) | 1 line
Fix multiprocessing.event to match the new threading.Event API
........
r71009 | jesse.noller | 2009-04-01 19:03:28 -0500 (Wed, 01 Apr 2009) | 1 line
issue5545: Switch to Autoconf for multiprocessing; special thanks to Martin Lowis for help
........
r71022 | jesse.noller | 2009-04-01 21:32:55 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3110: Additional protection for SEM_VALUE_MAX on platforms, thanks to Martin Loewis
........
r71036 | jesse.noller | 2009-04-01 23:22:09 -0500 (Wed, 01 Apr 2009) | 1 line
Issue 3551: Raise ValueError if the size causes ERROR_NO_SYSTEM_RESOURCES
........
2009-04-05 18:24:58 -03:00
|
|
|
self.assertEqual(wait(), True)
|
2017-07-24 19:33:56 -03:00
|
|
|
p.join()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2021-12-09 09:16:45 -04:00
|
|
|
def test_repr(self) -> None:
|
|
|
|
event = self.Event()
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
self.assertRegex(repr(event), r"<Event at .* unset>")
|
|
|
|
event.set()
|
|
|
|
self.assertRegex(repr(event), r"<Event at .* set>")
|
|
|
|
event.clear()
|
|
|
|
self.assertRegex(repr(event), r"<Event at .* unset>")
|
|
|
|
elif self.TYPE == 'manager':
|
|
|
|
self.assertRegex(repr(event), r"<EventProxy object, typeid 'Event' at .*")
|
|
|
|
event.set()
|
|
|
|
self.assertRegex(repr(event), r"<EventProxy object, typeid 'Event' at .*")
|
|
|
|
|
|
|
|
|
2012-06-15 14:26:07 -03:00
|
|
|
# Tests for Barrier - adapted from tests in test/lock_tests.py
|
|
|
|
#
|
|
|
|
|
|
|
|
# Many of the tests for threading.Barrier use a list as an atomic
|
|
|
|
# counter: a value is appended to increment the counter, and the
|
|
|
|
# length of the list gives the value. We use the class DummyList
|
|
|
|
# for the same purpose.
|
|
|
|
|
|
|
|
class _DummyList(object):
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
|
|
|
|
lock = multiprocessing.Lock()
|
|
|
|
self.__setstate__((wrapper, lock))
|
|
|
|
self._lengthbuf[0] = 0
|
|
|
|
|
|
|
|
def __setstate__(self, state):
|
|
|
|
(self._wrapper, self._lock) = state
|
|
|
|
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
|
|
|
|
|
|
|
|
def __getstate__(self):
|
|
|
|
return (self._wrapper, self._lock)
|
|
|
|
|
|
|
|
def append(self, _):
|
|
|
|
with self._lock:
|
|
|
|
self._lengthbuf[0] += 1
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
with self._lock:
|
|
|
|
return self._lengthbuf[0]
|
|
|
|
|
|
|
|
def _wait():
|
|
|
|
# A crude wait/yield function not relying on synchronization primitives.
|
|
|
|
time.sleep(0.01)
|
|
|
|
|
|
|
|
|
|
|
|
class Bunch(object):
|
|
|
|
"""
|
|
|
|
A bunch of threads.
|
|
|
|
"""
|
|
|
|
def __init__(self, namespace, f, args, n, wait_before_exit=False):
|
|
|
|
"""
|
|
|
|
Construct a bunch of `n` threads running the same function `f`.
|
|
|
|
If `wait_before_exit` is True, the threads won't terminate until
|
|
|
|
do_finish() is called.
|
|
|
|
"""
|
|
|
|
self.f = f
|
|
|
|
self.args = args
|
|
|
|
self.n = n
|
|
|
|
self.started = namespace.DummyList()
|
|
|
|
self.finished = namespace.DummyList()
|
2012-06-15 15:18:30 -03:00
|
|
|
self._can_exit = namespace.Event()
|
|
|
|
if not wait_before_exit:
|
|
|
|
self._can_exit.set()
|
2017-06-28 06:21:52 -03:00
|
|
|
|
|
|
|
threads = []
|
2012-06-15 14:26:07 -03:00
|
|
|
for i in range(n):
|
2012-06-15 15:18:30 -03:00
|
|
|
p = namespace.Process(target=self.task)
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
2017-06-28 06:21:52 -03:00
|
|
|
threads.append(p)
|
|
|
|
|
|
|
|
def finalize(threads):
|
|
|
|
for p in threads:
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
self._finalizer = weakref.finalize(self, finalize, threads)
|
2012-06-15 14:26:07 -03:00
|
|
|
|
|
|
|
def task(self):
|
|
|
|
pid = os.getpid()
|
|
|
|
self.started.append(pid)
|
|
|
|
try:
|
|
|
|
self.f(*self.args)
|
|
|
|
finally:
|
|
|
|
self.finished.append(pid)
|
2012-06-15 15:18:30 -03:00
|
|
|
self._can_exit.wait(30)
|
|
|
|
assert self._can_exit.is_set()
|
2012-06-15 14:26:07 -03:00
|
|
|
|
|
|
|
def wait_for_started(self):
|
|
|
|
while len(self.started) < self.n:
|
|
|
|
_wait()
|
|
|
|
|
|
|
|
def wait_for_finished(self):
|
|
|
|
while len(self.finished) < self.n:
|
|
|
|
_wait()
|
|
|
|
|
|
|
|
def do_finish(self):
|
2012-06-15 15:18:30 -03:00
|
|
|
self._can_exit.set()
|
2012-06-15 14:26:07 -03:00
|
|
|
|
2017-06-28 06:21:52 -03:00
|
|
|
def close(self):
|
|
|
|
self._finalizer()
|
|
|
|
|
2012-06-15 14:26:07 -03:00
|
|
|
|
|
|
|
class AppendTrue(object):
|
|
|
|
def __init__(self, obj):
|
|
|
|
self.obj = obj
|
|
|
|
def __call__(self):
|
|
|
|
self.obj.append(True)
|
|
|
|
|
|
|
|
|
|
|
|
class _TestBarrier(BaseTestCase):
|
|
|
|
"""
|
|
|
|
Tests for Barrier objects.
|
|
|
|
"""
|
|
|
|
N = 5
|
2012-06-18 10:11:10 -03:00
|
|
|
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
|
2012-06-15 14:26:07 -03:00
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.barrier.abort()
|
|
|
|
self.barrier = None
|
|
|
|
|
|
|
|
def DummyList(self):
|
|
|
|
if self.TYPE == 'threads':
|
|
|
|
return []
|
|
|
|
elif self.TYPE == 'manager':
|
|
|
|
return self.manager.list()
|
|
|
|
else:
|
|
|
|
return _DummyList()
|
|
|
|
|
|
|
|
def run_threads(self, f, args):
|
|
|
|
b = Bunch(self, f, args, self.N-1)
|
2017-06-28 06:21:52 -03:00
|
|
|
try:
|
|
|
|
f(*args)
|
|
|
|
b.wait_for_finished()
|
|
|
|
finally:
|
|
|
|
b.close()
|
2012-06-15 14:26:07 -03:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def multipass(cls, barrier, results, n):
|
|
|
|
m = barrier.parties
|
|
|
|
assert m == cls.N
|
|
|
|
for i in range(n):
|
|
|
|
results[0].append(True)
|
|
|
|
assert len(results[1]) == i * m
|
|
|
|
barrier.wait()
|
|
|
|
results[1].append(True)
|
|
|
|
assert len(results[0]) == (i + 1) * m
|
|
|
|
barrier.wait()
|
|
|
|
try:
|
|
|
|
assert barrier.n_waiting == 0
|
|
|
|
except NotImplementedError:
|
|
|
|
pass
|
|
|
|
assert not barrier.broken
|
|
|
|
|
|
|
|
def test_barrier(self, passes=1):
|
|
|
|
"""
|
|
|
|
Test that a barrier is passed in lockstep
|
|
|
|
"""
|
|
|
|
results = [self.DummyList(), self.DummyList()]
|
|
|
|
self.run_threads(self.multipass, (self.barrier, results, passes))
|
|
|
|
|
|
|
|
def test_barrier_10(self):
|
|
|
|
"""
|
|
|
|
Test that a barrier works for 10 consecutive runs
|
|
|
|
"""
|
|
|
|
return self.test_barrier(10)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_wait_return_f(cls, barrier, queue):
|
|
|
|
res = barrier.wait()
|
|
|
|
queue.put(res)
|
|
|
|
|
|
|
|
def test_wait_return(self):
|
|
|
|
"""
|
|
|
|
test the return value from barrier.wait
|
|
|
|
"""
|
|
|
|
queue = self.Queue()
|
|
|
|
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
|
|
|
|
results = [queue.get() for i in range(self.N)]
|
|
|
|
self.assertEqual(results.count(0), 1)
|
2017-07-24 21:40:55 -03:00
|
|
|
close_queue(queue)
|
2012-06-15 14:26:07 -03:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_action_f(cls, barrier, results):
|
|
|
|
barrier.wait()
|
|
|
|
if len(results) != 1:
|
|
|
|
raise RuntimeError
|
|
|
|
|
|
|
|
def test_action(self):
|
|
|
|
"""
|
|
|
|
Test the 'action' callback
|
|
|
|
"""
|
|
|
|
results = self.DummyList()
|
|
|
|
barrier = self.Barrier(self.N, action=AppendTrue(results))
|
|
|
|
self.run_threads(self._test_action_f, (barrier, results))
|
|
|
|
self.assertEqual(len(results), 1)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_abort_f(cls, barrier, results1, results2):
|
|
|
|
try:
|
|
|
|
i = barrier.wait()
|
|
|
|
if i == cls.N//2:
|
|
|
|
raise RuntimeError
|
|
|
|
barrier.wait()
|
|
|
|
results1.append(True)
|
|
|
|
except threading.BrokenBarrierError:
|
|
|
|
results2.append(True)
|
|
|
|
except RuntimeError:
|
|
|
|
barrier.abort()
|
|
|
|
|
|
|
|
def test_abort(self):
|
|
|
|
"""
|
|
|
|
Test that an abort will put the barrier in a broken state
|
|
|
|
"""
|
|
|
|
results1 = self.DummyList()
|
|
|
|
results2 = self.DummyList()
|
|
|
|
self.run_threads(self._test_abort_f,
|
|
|
|
(self.barrier, results1, results2))
|
|
|
|
self.assertEqual(len(results1), 0)
|
|
|
|
self.assertEqual(len(results2), self.N-1)
|
|
|
|
self.assertTrue(self.barrier.broken)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_reset_f(cls, barrier, results1, results2, results3):
|
|
|
|
i = barrier.wait()
|
|
|
|
if i == cls.N//2:
|
|
|
|
# Wait until the other threads are all in the barrier.
|
|
|
|
while barrier.n_waiting < cls.N-1:
|
|
|
|
time.sleep(0.001)
|
|
|
|
barrier.reset()
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
barrier.wait()
|
|
|
|
results1.append(True)
|
|
|
|
except threading.BrokenBarrierError:
|
|
|
|
results2.append(True)
|
|
|
|
# Now, pass the barrier again
|
|
|
|
barrier.wait()
|
|
|
|
results3.append(True)
|
|
|
|
|
|
|
|
def test_reset(self):
|
|
|
|
"""
|
|
|
|
Test that a 'reset' on a barrier frees the waiting threads
|
|
|
|
"""
|
|
|
|
results1 = self.DummyList()
|
|
|
|
results2 = self.DummyList()
|
|
|
|
results3 = self.DummyList()
|
|
|
|
self.run_threads(self._test_reset_f,
|
|
|
|
(self.barrier, results1, results2, results3))
|
|
|
|
self.assertEqual(len(results1), 0)
|
|
|
|
self.assertEqual(len(results2), self.N-1)
|
|
|
|
self.assertEqual(len(results3), self.N)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_abort_and_reset_f(cls, barrier, barrier2,
|
|
|
|
results1, results2, results3):
|
|
|
|
try:
|
|
|
|
i = barrier.wait()
|
|
|
|
if i == cls.N//2:
|
|
|
|
raise RuntimeError
|
|
|
|
barrier.wait()
|
|
|
|
results1.append(True)
|
|
|
|
except threading.BrokenBarrierError:
|
|
|
|
results2.append(True)
|
|
|
|
except RuntimeError:
|
|
|
|
barrier.abort()
|
|
|
|
# Synchronize and reset the barrier. Must synchronize first so
|
|
|
|
# that everyone has left it when we reset, and after so that no
|
|
|
|
# one enters it before the reset.
|
|
|
|
if barrier2.wait() == cls.N//2:
|
|
|
|
barrier.reset()
|
|
|
|
barrier2.wait()
|
|
|
|
barrier.wait()
|
|
|
|
results3.append(True)
|
|
|
|
|
|
|
|
def test_abort_and_reset(self):
|
|
|
|
"""
|
|
|
|
Test that a barrier can be reset after being broken.
|
|
|
|
"""
|
|
|
|
results1 = self.DummyList()
|
|
|
|
results2 = self.DummyList()
|
|
|
|
results3 = self.DummyList()
|
|
|
|
barrier2 = self.Barrier(self.N)
|
|
|
|
|
|
|
|
self.run_threads(self._test_abort_and_reset_f,
|
|
|
|
(self.barrier, barrier2, results1, results2, results3))
|
|
|
|
self.assertEqual(len(results1), 0)
|
|
|
|
self.assertEqual(len(results2), self.N-1)
|
|
|
|
self.assertEqual(len(results3), self.N)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_timeout_f(cls, barrier, results):
|
2012-06-18 10:11:10 -03:00
|
|
|
i = barrier.wait()
|
2012-06-15 14:26:07 -03:00
|
|
|
if i == cls.N//2:
|
|
|
|
# One thread is late!
|
2012-06-18 10:11:10 -03:00
|
|
|
time.sleep(1.0)
|
2012-06-15 14:26:07 -03:00
|
|
|
try:
|
|
|
|
barrier.wait(0.5)
|
|
|
|
except threading.BrokenBarrierError:
|
|
|
|
results.append(True)
|
|
|
|
|
|
|
|
def test_timeout(self):
|
|
|
|
"""
|
|
|
|
Test wait(timeout)
|
|
|
|
"""
|
|
|
|
results = self.DummyList()
|
|
|
|
self.run_threads(self._test_timeout_f, (self.barrier, results))
|
|
|
|
self.assertEqual(len(results), self.barrier.parties)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_default_timeout_f(cls, barrier, results):
|
2012-06-18 10:11:10 -03:00
|
|
|
i = barrier.wait(cls.defaultTimeout)
|
2012-06-15 14:26:07 -03:00
|
|
|
if i == cls.N//2:
|
|
|
|
# One thread is later than the default timeout
|
2012-06-18 10:11:10 -03:00
|
|
|
time.sleep(1.0)
|
2012-06-15 14:26:07 -03:00
|
|
|
try:
|
|
|
|
barrier.wait()
|
|
|
|
except threading.BrokenBarrierError:
|
|
|
|
results.append(True)
|
|
|
|
|
|
|
|
def test_default_timeout(self):
|
|
|
|
"""
|
|
|
|
Test the barrier's default timeout
|
|
|
|
"""
|
2012-06-18 10:11:10 -03:00
|
|
|
barrier = self.Barrier(self.N, timeout=0.5)
|
2012-06-15 14:26:07 -03:00
|
|
|
results = self.DummyList()
|
|
|
|
self.run_threads(self._test_default_timeout_f, (barrier, results))
|
|
|
|
self.assertEqual(len(results), barrier.parties)
|
|
|
|
|
|
|
|
def test_single_thread(self):
|
|
|
|
b = self.Barrier(1)
|
|
|
|
b.wait()
|
|
|
|
b.wait()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_thousand_f(cls, barrier, passes, conn, lock):
|
|
|
|
for i in range(passes):
|
|
|
|
barrier.wait()
|
|
|
|
with lock:
|
|
|
|
conn.send(i)
|
|
|
|
|
|
|
|
def test_thousand(self):
|
|
|
|
if self.TYPE == 'manager':
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2012-06-15 14:26:07 -03:00
|
|
|
passes = 1000
|
|
|
|
lock = self.Lock()
|
|
|
|
conn, child_conn = self.Pipe(False)
|
|
|
|
for j in range(self.N):
|
|
|
|
p = self.Process(target=self._test_thousand_f,
|
|
|
|
args=(self.barrier, passes, child_conn, lock))
|
|
|
|
p.start()
|
2017-07-24 19:33:56 -03:00
|
|
|
self.addCleanup(p.join)
|
2012-06-15 14:26:07 -03:00
|
|
|
|
|
|
|
for i in range(passes):
|
|
|
|
for j in range(self.N):
|
|
|
|
self.assertEqual(conn.recv(), i)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
class _TestValue(BaseTestCase):
|
|
|
|
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
codes_values = [
|
|
|
|
('i', 4343, 24234),
|
|
|
|
('d', 3.625, -4.25),
|
|
|
|
('h', -232, 234),
|
2017-07-21 07:35:33 -03:00
|
|
|
('q', 2 ** 33, 2 ** 34),
|
2008-06-11 13:44:04 -03:00
|
|
|
('c', latin('x'), latin('y'))
|
|
|
|
]
|
|
|
|
|
2010-11-22 12:26:21 -04:00
|
|
|
def setUp(self):
|
|
|
|
if not HAS_SHAREDCTYPES:
|
|
|
|
self.skipTest("requires multiprocessing.sharedctypes")
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test(cls, values):
|
|
|
|
for sv, cv in zip(values, cls.codes_values):
|
2008-06-11 13:44:04 -03:00
|
|
|
sv.value = cv[2]
|
|
|
|
|
|
|
|
|
|
|
|
def test_value(self, raw=False):
|
|
|
|
if raw:
|
|
|
|
values = [self.RawValue(code, value)
|
|
|
|
for code, value, _ in self.codes_values]
|
|
|
|
else:
|
|
|
|
values = [self.Value(code, value)
|
|
|
|
for code, value, _ in self.codes_values]
|
|
|
|
|
|
|
|
for sv, cv in zip(values, self.codes_values):
|
|
|
|
self.assertEqual(sv.value, cv[1])
|
|
|
|
|
|
|
|
proc = self.Process(target=self._test, args=(values,))
|
2011-09-09 15:26:57 -03:00
|
|
|
proc.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
proc.start()
|
|
|
|
proc.join()
|
|
|
|
|
|
|
|
for sv, cv in zip(values, self.codes_values):
|
|
|
|
self.assertEqual(sv.value, cv[2])
|
|
|
|
|
|
|
|
def test_rawvalue(self):
|
|
|
|
self.test_value(raw=True)
|
|
|
|
|
|
|
|
def test_getobj_getlock(self):
|
|
|
|
val1 = self.Value('i', 5)
|
|
|
|
lock1 = val1.get_lock()
|
|
|
|
obj1 = val1.get_obj()
|
|
|
|
|
|
|
|
val2 = self.Value('i', 5, lock=None)
|
|
|
|
lock2 = val2.get_lock()
|
|
|
|
obj2 = val2.get_obj()
|
|
|
|
|
|
|
|
lock = self.Lock()
|
|
|
|
val3 = self.Value('i', 5, lock=lock)
|
|
|
|
lock3 = val3.get_lock()
|
|
|
|
obj3 = val3.get_obj()
|
|
|
|
self.assertEqual(lock, lock3)
|
|
|
|
|
2009-01-17 23:11:38 -04:00
|
|
|
arr4 = self.Value('i', 5, lock=False)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertFalse(hasattr(arr4, 'get_lock'))
|
|
|
|
self.assertFalse(hasattr(arr4, 'get_obj'))
|
|
|
|
|
2009-01-17 23:11:38 -04:00
|
|
|
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
|
|
|
|
|
|
|
|
arr5 = self.RawValue('i', 5)
|
|
|
|
self.assertFalse(hasattr(arr5, 'get_lock'))
|
|
|
|
self.assertFalse(hasattr(arr5, 'get_obj'))
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
class _TestArray(BaseTestCase):
|
|
|
|
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def f(cls, seq):
|
2008-06-11 13:44:04 -03:00
|
|
|
for i in range(1, len(seq)):
|
|
|
|
seq[i] += seq[i-1]
|
|
|
|
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
@unittest.skipIf(c_int is None, "requires _ctypes")
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_array(self, raw=False):
|
|
|
|
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
|
|
|
|
if raw:
|
|
|
|
arr = self.RawArray('i', seq)
|
|
|
|
else:
|
|
|
|
arr = self.Array('i', seq)
|
|
|
|
|
|
|
|
self.assertEqual(len(arr), len(seq))
|
|
|
|
self.assertEqual(arr[3], seq[3])
|
|
|
|
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
|
|
|
|
|
|
|
|
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
|
|
|
|
|
|
|
|
self.assertEqual(list(arr[:]), seq)
|
|
|
|
|
|
|
|
self.f(seq)
|
|
|
|
|
|
|
|
p = self.Process(target=self.f, args=(arr,))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
self.assertEqual(list(arr[:]), seq)
|
|
|
|
|
2011-03-26 07:19:03 -03:00
|
|
|
@unittest.skipIf(c_int is None, "requires _ctypes")
|
|
|
|
def test_array_from_size(self):
|
|
|
|
size = 10
|
|
|
|
# Test for zeroing (see issue #11675).
|
|
|
|
# The repetition below strengthens the test by increasing the chances
|
|
|
|
# of previously allocated non-zero memory being used for the new array
|
|
|
|
# on the 2nd and 3rd loops.
|
|
|
|
for _ in range(3):
|
|
|
|
arr = self.Array('i', size)
|
|
|
|
self.assertEqual(len(arr), size)
|
|
|
|
self.assertEqual(list(arr), [0] * size)
|
|
|
|
arr[:] = range(10)
|
|
|
|
self.assertEqual(list(arr), list(range(10)))
|
|
|
|
del arr
|
|
|
|
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
@unittest.skipIf(c_int is None, "requires _ctypes")
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_rawarray(self):
|
|
|
|
self.test_array(raw=True)
|
|
|
|
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
@unittest.skipIf(c_int is None, "requires _ctypes")
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_getobj_getlock_obj(self):
|
|
|
|
arr1 = self.Array('i', list(range(10)))
|
|
|
|
lock1 = arr1.get_lock()
|
|
|
|
obj1 = arr1.get_obj()
|
|
|
|
|
|
|
|
arr2 = self.Array('i', list(range(10)), lock=None)
|
|
|
|
lock2 = arr2.get_lock()
|
|
|
|
obj2 = arr2.get_obj()
|
|
|
|
|
|
|
|
lock = self.Lock()
|
|
|
|
arr3 = self.Array('i', list(range(10)), lock=lock)
|
|
|
|
lock3 = arr3.get_lock()
|
|
|
|
obj3 = arr3.get_obj()
|
|
|
|
self.assertEqual(lock, lock3)
|
|
|
|
|
2009-01-17 23:11:38 -04:00
|
|
|
arr4 = self.Array('i', range(10), lock=False)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertFalse(hasattr(arr4, 'get_lock'))
|
|
|
|
self.assertFalse(hasattr(arr4, 'get_obj'))
|
2009-01-17 23:11:38 -04:00
|
|
|
self.assertRaises(AttributeError,
|
|
|
|
self.Array, 'i', range(10), lock='notalock')
|
|
|
|
|
|
|
|
arr5 = self.RawArray('i', range(10))
|
|
|
|
self.assertFalse(hasattr(arr5, 'get_lock'))
|
|
|
|
self.assertFalse(hasattr(arr5, 'get_obj'))
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
class _TestContainers(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('manager',)
|
|
|
|
|
|
|
|
def test_list(self):
|
|
|
|
a = self.list(list(range(10)))
|
|
|
|
self.assertEqual(a[:], list(range(10)))
|
|
|
|
|
|
|
|
b = self.list()
|
|
|
|
self.assertEqual(b[:], [])
|
|
|
|
|
|
|
|
b.extend(list(range(5)))
|
|
|
|
self.assertEqual(b[:], list(range(5)))
|
|
|
|
|
|
|
|
self.assertEqual(b[2], 2)
|
|
|
|
self.assertEqual(b[2:10], [2,3,4])
|
|
|
|
|
|
|
|
b *= 2
|
|
|
|
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
|
|
|
|
|
|
|
|
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
|
|
|
|
|
|
|
|
self.assertEqual(a[:], list(range(10)))
|
|
|
|
|
|
|
|
d = [a, b]
|
|
|
|
e = self.list(d)
|
|
|
|
self.assertEqual(
|
2016-09-07 20:48:01 -03:00
|
|
|
[element[:] for element in e],
|
2008-06-11 13:44:04 -03:00
|
|
|
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
|
|
|
|
)
|
|
|
|
|
|
|
|
f = self.list([a])
|
|
|
|
a.append('hello')
|
2016-09-07 20:48:01 -03:00
|
|
|
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
|
|
|
|
|
2018-09-17 08:24:01 -03:00
|
|
|
def test_list_iter(self):
|
|
|
|
a = self.list(list(range(10)))
|
|
|
|
it = iter(a)
|
|
|
|
self.assertEqual(list(it), list(range(10)))
|
|
|
|
self.assertEqual(list(it), []) # exhausted
|
|
|
|
# list modified during iteration
|
|
|
|
it = iter(a)
|
|
|
|
a[0] = 100
|
|
|
|
self.assertEqual(next(it), 100)
|
|
|
|
|
2016-09-07 20:48:01 -03:00
|
|
|
def test_list_proxy_in_list(self):
|
|
|
|
a = self.list([self.list(range(3)) for _i in range(3)])
|
|
|
|
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
|
|
|
|
|
|
|
|
a[0][-1] = 55
|
|
|
|
self.assertEqual(a[0][:], [0, 1, 55])
|
|
|
|
for i in range(1, 3):
|
|
|
|
self.assertEqual(a[i][:], [0, 1, 2])
|
|
|
|
|
|
|
|
self.assertEqual(a[1].pop(), 2)
|
|
|
|
self.assertEqual(len(a[1]), 2)
|
|
|
|
for i in range(0, 3, 2):
|
|
|
|
self.assertEqual(len(a[i]), 3)
|
|
|
|
|
|
|
|
del a
|
|
|
|
|
|
|
|
b = self.list()
|
|
|
|
b.append(b)
|
|
|
|
del b
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_dict(self):
|
|
|
|
d = self.dict()
|
|
|
|
indices = list(range(65, 70))
|
|
|
|
for i in indices:
|
|
|
|
d[i] = chr(i)
|
|
|
|
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
|
|
|
|
self.assertEqual(sorted(d.keys()), indices)
|
|
|
|
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
|
|
|
|
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
|
|
|
|
|
2018-09-17 08:24:01 -03:00
|
|
|
def test_dict_iter(self):
|
|
|
|
d = self.dict()
|
|
|
|
indices = list(range(65, 70))
|
|
|
|
for i in indices:
|
|
|
|
d[i] = chr(i)
|
|
|
|
it = iter(d)
|
|
|
|
self.assertEqual(list(it), indices)
|
|
|
|
self.assertEqual(list(it), []) # exhausted
|
|
|
|
# dictionary changed size during iteration
|
|
|
|
it = iter(d)
|
|
|
|
d.clear()
|
|
|
|
self.assertRaises(RuntimeError, next, it)
|
|
|
|
|
2016-09-07 20:48:01 -03:00
|
|
|
def test_dict_proxy_nested(self):
|
|
|
|
pets = self.dict(ferrets=2, hamsters=4)
|
|
|
|
supplies = self.dict(water=10, feed=3)
|
|
|
|
d = self.dict(pets=pets, supplies=supplies)
|
|
|
|
|
|
|
|
self.assertEqual(supplies['water'], 10)
|
|
|
|
self.assertEqual(d['supplies']['water'], 10)
|
|
|
|
|
|
|
|
d['supplies']['blankets'] = 5
|
|
|
|
self.assertEqual(supplies['blankets'], 5)
|
|
|
|
self.assertEqual(d['supplies']['blankets'], 5)
|
|
|
|
|
|
|
|
d['supplies']['water'] = 7
|
|
|
|
self.assertEqual(supplies['water'], 7)
|
|
|
|
self.assertEqual(d['supplies']['water'], 7)
|
|
|
|
|
|
|
|
del pets
|
|
|
|
del supplies
|
|
|
|
self.assertEqual(d['pets']['ferrets'], 2)
|
|
|
|
d['supplies']['blankets'] = 11
|
|
|
|
self.assertEqual(d['supplies']['blankets'], 11)
|
|
|
|
|
|
|
|
pets = d['pets']
|
|
|
|
supplies = d['supplies']
|
|
|
|
supplies['water'] = 7
|
|
|
|
self.assertEqual(supplies['water'], 7)
|
|
|
|
self.assertEqual(d['supplies']['water'], 7)
|
|
|
|
|
|
|
|
d.clear()
|
|
|
|
self.assertEqual(len(d), 0)
|
|
|
|
self.assertEqual(supplies['water'], 7)
|
|
|
|
self.assertEqual(pets['hamsters'], 4)
|
|
|
|
|
|
|
|
l = self.list([pets, supplies])
|
|
|
|
l[0]['marmots'] = 1
|
|
|
|
self.assertEqual(pets['marmots'], 1)
|
|
|
|
self.assertEqual(l[0]['marmots'], 1)
|
|
|
|
|
|
|
|
del pets
|
|
|
|
del supplies
|
|
|
|
self.assertEqual(l[0]['marmots'], 1)
|
|
|
|
|
|
|
|
outer = self.list([[88, 99], l])
|
|
|
|
self.assertIsInstance(outer[0], list) # Not a ListProxy
|
|
|
|
self.assertEqual(outer[-1][-1]['feed'], 3)
|
|
|
|
|
2021-07-02 00:45:02 -03:00
|
|
|
def test_nested_queue(self):
|
|
|
|
a = self.list() # Test queue inside list
|
|
|
|
a.append(self.Queue())
|
|
|
|
a[0].put(123)
|
|
|
|
self.assertEqual(a[0].get(), 123)
|
|
|
|
b = self.dict() # Test queue inside dict
|
|
|
|
b[0] = self.Queue()
|
|
|
|
b[0].put(456)
|
|
|
|
self.assertEqual(b[0].get(), 456)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_namespace(self):
|
|
|
|
n = self.Namespace()
|
|
|
|
n.name = 'Bob'
|
|
|
|
n.job = 'Builder'
|
|
|
|
n._hidden = 'hidden'
|
|
|
|
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
|
|
|
|
del n.job
|
|
|
|
self.assertEqual(str(n), "Namespace(name='Bob')")
|
|
|
|
self.assertTrue(hasattr(n, 'name'))
|
|
|
|
self.assertTrue(not hasattr(n, 'job'))
|
|
|
|
|
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
2023-10-30 14:18:36 -03:00
|
|
|
def sqr(x, wait=0.0, event=None):
|
|
|
|
if event is None:
|
|
|
|
time.sleep(wait)
|
|
|
|
else:
|
|
|
|
event.wait(wait)
|
2008-06-11 13:44:04 -03:00
|
|
|
return x*x
|
2010-11-09 16:55:52 -04:00
|
|
|
|
2011-12-21 06:03:24 -04:00
|
|
|
def mul(x, y):
|
|
|
|
return x*y
|
|
|
|
|
2016-02-10 18:58:18 -04:00
|
|
|
def raise_large_valuerror(wait):
|
|
|
|
time.sleep(wait)
|
|
|
|
raise ValueError("x" * 1024**2)
|
|
|
|
|
2017-03-24 09:52:11 -03:00
|
|
|
def identity(x):
|
|
|
|
return x
|
|
|
|
|
|
|
|
class CountedObject(object):
|
|
|
|
n_instances = 0
|
|
|
|
|
|
|
|
def __new__(cls):
|
|
|
|
cls.n_instances += 1
|
|
|
|
return object.__new__(cls)
|
|
|
|
|
|
|
|
def __del__(self):
|
|
|
|
type(self).n_instances -= 1
|
|
|
|
|
2015-03-13 03:25:26 -03:00
|
|
|
class SayWhenError(ValueError): pass
|
|
|
|
|
|
|
|
def exception_throwing_generator(total, when):
|
2017-03-29 00:58:54 -03:00
|
|
|
if when == -1:
|
|
|
|
raise SayWhenError("Somebody said when")
|
2015-03-13 03:25:26 -03:00
|
|
|
for i in range(total):
|
|
|
|
if i == when:
|
|
|
|
raise SayWhenError("Somebody said when")
|
|
|
|
yield i
|
|
|
|
|
2017-03-24 09:52:11 -03:00
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
class _TestPool(BaseTestCase):
|
|
|
|
|
2012-10-08 10:56:24 -03:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
super().setUpClass()
|
|
|
|
cls.pool = cls.Pool(4)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
cls.pool.terminate()
|
|
|
|
cls.pool.join()
|
|
|
|
cls.pool = None
|
|
|
|
super().tearDownClass()
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_apply(self):
|
|
|
|
papply = self.pool.apply
|
|
|
|
self.assertEqual(papply(sqr, (5,)), sqr(5))
|
|
|
|
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
|
|
|
|
|
|
|
|
def test_map(self):
|
|
|
|
pmap = self.pool.map
|
|
|
|
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
|
|
|
|
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
|
|
|
|
list(map(sqr, list(range(100)))))
|
|
|
|
|
2011-12-21 06:03:24 -04:00
|
|
|
def test_starmap(self):
|
|
|
|
psmap = self.pool.starmap
|
|
|
|
tuples = list(zip(range(10), range(9,-1, -1)))
|
|
|
|
self.assertEqual(psmap(mul, tuples),
|
|
|
|
list(itertools.starmap(mul, tuples)))
|
|
|
|
tuples = list(zip(range(100), range(99,-1, -1)))
|
|
|
|
self.assertEqual(psmap(mul, tuples, chunksize=20),
|
|
|
|
list(itertools.starmap(mul, tuples)))
|
|
|
|
|
|
|
|
def test_starmap_async(self):
|
|
|
|
tuples = list(zip(range(100), range(99,-1, -1)))
|
|
|
|
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
|
|
|
|
list(itertools.starmap(mul, tuples)))
|
|
|
|
|
2012-10-27 07:53:02 -03:00
|
|
|
def test_map_async(self):
|
|
|
|
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
|
|
|
|
list(map(sqr, list(range(10)))))
|
|
|
|
|
|
|
|
def test_map_async_callbacks(self):
|
|
|
|
call_args = self.manager.list() if self.TYPE == 'manager' else []
|
|
|
|
self.pool.map_async(int, ['1'],
|
|
|
|
callback=call_args.append,
|
|
|
|
error_callback=call_args.append).wait()
|
|
|
|
self.assertEqual(1, len(call_args))
|
|
|
|
self.assertEqual([1], call_args[0])
|
|
|
|
self.pool.map_async(int, ['a'],
|
|
|
|
callback=call_args.append,
|
|
|
|
error_callback=call_args.append).wait()
|
|
|
|
self.assertEqual(2, len(call_args))
|
|
|
|
self.assertIsInstance(call_args[1], ValueError)
|
|
|
|
|
2013-10-28 20:11:58 -03:00
|
|
|
def test_map_unplicklable(self):
|
|
|
|
# Issue #19425 -- failure to pickle should not cause a hang
|
|
|
|
if self.TYPE == 'threads':
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2013-10-28 20:11:58 -03:00
|
|
|
class A(object):
|
|
|
|
def __reduce__(self):
|
|
|
|
raise RuntimeError('cannot pickle')
|
|
|
|
with self.assertRaises(RuntimeError):
|
|
|
|
self.pool.map(sqr, [A()]*10)
|
|
|
|
|
Merged revisions 73995,74002,74005,74007-74008,74011,74019-74023 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r73995 | vinay.sajip | 2009-07-13 07:21:05 -0400 (Mon, 13 Jul 2009) | 1 line
Issue #6314: logging: Extra checks on the "level" argument in more places.
........
r74002 | marc-andre.lemburg | 2009-07-13 16:23:49 -0400 (Mon, 13 Jul 2009) | 6 lines
Use a new global DEV_NULL instead of hard-coding /dev/null into the system
command helper functions.
See #6479 for some motivation.
........
r74005 | marc-andre.lemburg | 2009-07-13 17:28:33 -0400 (Mon, 13 Jul 2009) | 6 lines
Use a different VER command output parser to address the localization
issues mentioned in #3410.
Prepare for Windows 7 (still commented out).
........
r74007 | michael.foord | 2009-07-14 13:58:12 -0400 (Tue, 14 Jul 2009) | 1 line
Move TestRunner initialisation into unittest.TestProgram.runTests. Fixes issue 6418.
........
r74008 | benjamin.peterson | 2009-07-14 20:46:42 -0400 (Tue, 14 Jul 2009) | 1 line
update year
........
r74011 | ezio.melotti | 2009-07-15 13:07:04 -0400 (Wed, 15 Jul 2009) | 1 line
methods' names pep8ification
........
r74019 | amaury.forgeotdarc | 2009-07-15 17:29:27 -0400 (Wed, 15 Jul 2009) | 2 lines
#6076 Add a title to the IDLE Preferences window.
........
r74020 | georg.brandl | 2009-07-16 03:18:07 -0400 (Thu, 16 Jul 2009) | 1 line
#5910: fix kqueue for calls with more than one event.
........
r74021 | georg.brandl | 2009-07-16 03:33:04 -0400 (Thu, 16 Jul 2009) | 1 line
#6486: start with built in functions rather than "built in objects".
........
r74022 | georg.brandl | 2009-07-16 03:38:35 -0400 (Thu, 16 Jul 2009) | 1 line
#6481: fix typo in os.system() replacement.
........
r74023 | jesse.noller | 2009-07-16 10:23:04 -0400 (Thu, 16 Jul 2009) | 1 line
Issue 6433: multiprocessing.pool.map hangs on empty list
........
2009-07-17 06:18:18 -03:00
|
|
|
def test_map_chunksize(self):
|
|
|
|
try:
|
|
|
|
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
|
|
|
|
except multiprocessing.TimeoutError:
|
|
|
|
self.fail("pool.map_async with chunksize stalled on null list")
|
|
|
|
|
2017-03-29 00:58:54 -03:00
|
|
|
def test_map_handle_iterable_exception(self):
|
|
|
|
if self.TYPE == 'manager':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
|
|
|
# SayWhenError seen at the very first of the iterable
|
|
|
|
with self.assertRaises(SayWhenError):
|
|
|
|
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
|
|
|
|
# again, make sure it's reentrant
|
|
|
|
with self.assertRaises(SayWhenError):
|
|
|
|
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
|
|
|
|
|
|
|
|
with self.assertRaises(SayWhenError):
|
|
|
|
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
|
|
|
|
|
|
|
|
class SpecialIterable:
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
def __next__(self):
|
|
|
|
raise SayWhenError
|
|
|
|
def __len__(self):
|
|
|
|
return 1
|
|
|
|
with self.assertRaises(SayWhenError):
|
|
|
|
self.pool.map(sqr, SpecialIterable(), 1)
|
|
|
|
with self.assertRaises(SayWhenError):
|
|
|
|
self.pool.map(sqr, SpecialIterable(), 1)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_async(self):
|
|
|
|
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
|
|
|
|
get = TimingWrapper(res.get)
|
|
|
|
self.assertEqual(get(), 49)
|
|
|
|
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
|
|
|
|
|
|
|
|
def test_async_timeout(self):
|
2023-10-30 14:18:36 -03:00
|
|
|
p = self.Pool(3)
|
|
|
|
try:
|
|
|
|
event = threading.Event() if self.TYPE == 'threads' else None
|
|
|
|
res = p.apply_async(sqr, (6, TIMEOUT2 + support.SHORT_TIMEOUT, event))
|
|
|
|
get = TimingWrapper(res.get)
|
|
|
|
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
|
|
|
|
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
|
|
|
|
finally:
|
|
|
|
if event is not None:
|
|
|
|
event.set()
|
|
|
|
p.terminate()
|
|
|
|
p.join()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_imap(self):
|
|
|
|
it = self.pool.imap(sqr, list(range(10)))
|
|
|
|
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
|
|
|
|
|
|
|
|
it = self.pool.imap(sqr, list(range(10)))
|
|
|
|
for i in range(10):
|
|
|
|
self.assertEqual(next(it), i*i)
|
|
|
|
self.assertRaises(StopIteration, it.__next__)
|
|
|
|
|
|
|
|
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
|
|
|
|
for i in range(1000):
|
|
|
|
self.assertEqual(next(it), i*i)
|
|
|
|
self.assertRaises(StopIteration, it.__next__)
|
|
|
|
|
2015-03-13 03:25:26 -03:00
|
|
|
def test_imap_handle_iterable_exception(self):
|
|
|
|
if self.TYPE == 'manager':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
2017-03-29 00:58:54 -03:00
|
|
|
# SayWhenError seen at the very first of the iterable
|
|
|
|
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
|
|
|
|
self.assertRaises(SayWhenError, it.__next__)
|
|
|
|
# again, make sure it's reentrant
|
|
|
|
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
|
|
|
|
self.assertRaises(SayWhenError, it.__next__)
|
|
|
|
|
2015-03-13 03:25:26 -03:00
|
|
|
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
|
|
|
|
for i in range(3):
|
|
|
|
self.assertEqual(next(it), i*i)
|
|
|
|
self.assertRaises(SayWhenError, it.__next__)
|
|
|
|
|
|
|
|
# SayWhenError seen at start of problematic chunk's results
|
|
|
|
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
|
|
|
|
for i in range(6):
|
|
|
|
self.assertEqual(next(it), i*i)
|
|
|
|
self.assertRaises(SayWhenError, it.__next__)
|
|
|
|
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
|
|
|
|
for i in range(4):
|
|
|
|
self.assertEqual(next(it), i*i)
|
|
|
|
self.assertRaises(SayWhenError, it.__next__)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_imap_unordered(self):
|
2018-07-03 08:20:35 -03:00
|
|
|
it = self.pool.imap_unordered(sqr, list(range(10)))
|
|
|
|
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2018-07-03 08:20:35 -03:00
|
|
|
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
|
|
|
|
|
2015-03-13 03:25:26 -03:00
|
|
|
def test_imap_unordered_handle_iterable_exception(self):
|
|
|
|
if self.TYPE == 'manager':
|
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
|
|
|
|
2017-03-29 00:58:54 -03:00
|
|
|
# SayWhenError seen at the very first of the iterable
|
|
|
|
it = self.pool.imap_unordered(sqr,
|
|
|
|
exception_throwing_generator(1, -1),
|
|
|
|
1)
|
|
|
|
self.assertRaises(SayWhenError, it.__next__)
|
|
|
|
# again, make sure it's reentrant
|
|
|
|
it = self.pool.imap_unordered(sqr,
|
|
|
|
exception_throwing_generator(1, -1),
|
|
|
|
1)
|
|
|
|
self.assertRaises(SayWhenError, it.__next__)
|
|
|
|
|
2015-03-13 03:25:26 -03:00
|
|
|
it = self.pool.imap_unordered(sqr,
|
|
|
|
exception_throwing_generator(10, 3),
|
|
|
|
1)
|
2015-04-23 05:35:59 -03:00
|
|
|
expected_values = list(map(sqr, list(range(10))))
|
2015-03-13 03:25:26 -03:00
|
|
|
with self.assertRaises(SayWhenError):
|
|
|
|
# imap_unordered makes it difficult to anticipate the SayWhenError
|
|
|
|
for i in range(10):
|
2015-04-23 05:35:59 -03:00
|
|
|
value = next(it)
|
|
|
|
self.assertIn(value, expected_values)
|
|
|
|
expected_values.remove(value)
|
2015-03-13 03:25:26 -03:00
|
|
|
|
|
|
|
it = self.pool.imap_unordered(sqr,
|
|
|
|
exception_throwing_generator(20, 7),
|
|
|
|
2)
|
2015-04-23 05:35:59 -03:00
|
|
|
expected_values = list(map(sqr, list(range(20))))
|
2015-03-13 03:25:26 -03:00
|
|
|
with self.assertRaises(SayWhenError):
|
|
|
|
for i in range(20):
|
2015-04-23 05:35:59 -03:00
|
|
|
value = next(it)
|
|
|
|
self.assertIn(value, expected_values)
|
|
|
|
expected_values.remove(value)
|
2015-03-13 03:25:26 -03:00
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_make_pool(self):
|
2016-03-15 06:48:28 -03:00
|
|
|
expected_error = (RemoteError if self.TYPE == 'manager'
|
|
|
|
else ValueError)
|
2011-06-20 12:53:35 -03:00
|
|
|
|
2016-03-15 06:48:28 -03:00
|
|
|
self.assertRaises(expected_error, self.Pool, -1)
|
|
|
|
self.assertRaises(expected_error, self.Pool, 0)
|
|
|
|
|
|
|
|
if self.TYPE != 'manager':
|
|
|
|
p = self.Pool(3)
|
|
|
|
try:
|
|
|
|
self.assertEqual(3, len(p._pool))
|
|
|
|
finally:
|
|
|
|
p.close()
|
|
|
|
p.join()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_terminate(self):
|
2024-01-17 21:15:29 -04:00
|
|
|
# Simulate slow tasks which take "forever" to complete
|
|
|
|
sleep_time = support.LONG_TIMEOUT
|
|
|
|
|
2023-11-04 10:59:24 -03:00
|
|
|
if self.TYPE == 'threads':
|
2024-01-17 21:15:29 -04:00
|
|
|
# Thread pool workers can't be forced to quit, so if the first
|
|
|
|
# task starts early enough, we will end up waiting for it.
|
|
|
|
# Sleep for a shorter time, so the test doesn't block.
|
|
|
|
sleep_time = 1
|
2023-11-04 10:59:24 -03:00
|
|
|
|
2023-10-30 14:18:36 -03:00
|
|
|
p = self.Pool(3)
|
2024-01-17 21:15:29 -04:00
|
|
|
args = [sleep_time for i in range(10_000)]
|
2023-10-30 14:18:36 -03:00
|
|
|
result = p.map_async(time.sleep, args, chunksize=1)
|
2024-01-24 03:13:09 -04:00
|
|
|
time.sleep(0.2) # give some tasks a chance to start
|
2023-10-30 14:18:36 -03:00
|
|
|
p.terminate()
|
|
|
|
p.join()
|
2010-01-26 23:36:01 -04:00
|
|
|
|
2012-06-06 15:04:57 -03:00
|
|
|
def test_empty_iterable(self):
|
|
|
|
# See Issue 12157
|
|
|
|
p = self.Pool(1)
|
|
|
|
|
|
|
|
self.assertEqual(p.map(sqr, []), [])
|
|
|
|
self.assertEqual(list(p.imap(sqr, [])), [])
|
|
|
|
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
|
|
|
|
self.assertEqual(p.map_async(sqr, []).get(), [])
|
|
|
|
|
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
|
2012-06-18 13:47:52 -03:00
|
|
|
def test_context(self):
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
L = list(range(10))
|
|
|
|
expected = [sqr(i) for i in L]
|
2016-03-15 06:48:28 -03:00
|
|
|
with self.Pool(2) as p:
|
2012-06-18 13:47:52 -03:00
|
|
|
r = p.map_async(sqr, L)
|
|
|
|
self.assertEqual(r.get(), expected)
|
2018-12-06 06:56:52 -04:00
|
|
|
p.join()
|
2012-09-25 13:45:42 -03:00
|
|
|
self.assertRaises(ValueError, p.map_async, sqr, L)
|
2012-06-18 13:47:52 -03:00
|
|
|
|
2013-05-06 07:38:25 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_traceback(cls):
|
|
|
|
raise RuntimeError(123) # some comment
|
|
|
|
|
|
|
|
def test_traceback(self):
|
|
|
|
# We want ensure that the traceback from the child process is
|
|
|
|
# contained in the traceback raised in the main process.
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
with self.Pool(1) as p:
|
|
|
|
try:
|
|
|
|
p.apply(self._test_traceback)
|
|
|
|
except Exception as e:
|
|
|
|
exc = e
|
|
|
|
else:
|
2017-03-29 00:58:54 -03:00
|
|
|
self.fail('expected RuntimeError')
|
2018-12-06 06:56:52 -04:00
|
|
|
p.join()
|
2013-05-06 07:38:25 -03:00
|
|
|
self.assertIs(type(exc), RuntimeError)
|
|
|
|
self.assertEqual(exc.args, (123,))
|
|
|
|
cause = exc.__cause__
|
|
|
|
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
|
|
|
|
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
|
|
|
|
|
|
|
|
with test.support.captured_stderr() as f1:
|
|
|
|
try:
|
|
|
|
raise exc
|
|
|
|
except RuntimeError:
|
|
|
|
sys.excepthook(*sys.exc_info())
|
|
|
|
self.assertIn('raise RuntimeError(123) # some comment',
|
|
|
|
f1.getvalue())
|
2017-03-29 00:58:54 -03:00
|
|
|
# _helper_reraises_exception should not make the error
|
|
|
|
# a remote exception
|
|
|
|
with self.Pool(1) as p:
|
|
|
|
try:
|
|
|
|
p.map(sqr, exception_throwing_generator(1, -1), 1)
|
|
|
|
except Exception as e:
|
|
|
|
exc = e
|
|
|
|
else:
|
|
|
|
self.fail('expected SayWhenError')
|
|
|
|
self.assertIs(type(exc), SayWhenError)
|
|
|
|
self.assertIs(exc.__cause__, None)
|
2018-12-06 06:56:52 -04:00
|
|
|
p.join()
|
2013-05-06 07:38:25 -03:00
|
|
|
|
2014-03-23 09:30:54 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_wrapped_exception(cls):
|
|
|
|
raise RuntimeError('foo')
|
|
|
|
|
|
|
|
def test_wrapped_exception(self):
|
|
|
|
# Issue #20980: Should not wrap exception when using thread pool
|
|
|
|
with self.Pool(1) as p:
|
|
|
|
with self.assertRaises(RuntimeError):
|
|
|
|
p.apply(self._test_wrapped_exception)
|
2018-11-27 20:14:31 -04:00
|
|
|
p.join()
|
2014-03-23 09:30:54 -03:00
|
|
|
|
2016-02-10 18:58:18 -04:00
|
|
|
def test_map_no_failfast(self):
|
|
|
|
# Issue #23992: the fail-fast behaviour when an exception is raised
|
|
|
|
# during map() would make Pool.join() deadlock, because a worker
|
|
|
|
# process would fill the result queue (after the result handler thread
|
|
|
|
# terminated, hence not draining it anymore).
|
|
|
|
|
2018-12-17 04:36:36 -04:00
|
|
|
t_start = time.monotonic()
|
2016-02-10 18:58:18 -04:00
|
|
|
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
with self.Pool(2) as p:
|
|
|
|
try:
|
|
|
|
p.map(raise_large_valuerror, [0, 1])
|
|
|
|
finally:
|
|
|
|
time.sleep(0.5)
|
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
# check that we indeed waited for all jobs
|
2018-12-17 04:36:36 -04:00
|
|
|
self.assertGreater(time.monotonic() - t_start, 0.9)
|
2016-02-10 18:58:18 -04:00
|
|
|
|
2024-05-01 18:58:22 -03:00
|
|
|
@support.requires_gil_enabled("gh-118413: test is flaky with GIL disabled")
|
2017-03-24 09:52:11 -03:00
|
|
|
def test_release_task_refs(self):
|
|
|
|
# Issue #29861: task arguments and results should not be kept
|
|
|
|
# alive after we are done with them.
|
|
|
|
objs = [CountedObject() for i in range(10)]
|
|
|
|
refs = [weakref.ref(o) for o in objs]
|
|
|
|
self.pool.map(identity, objs)
|
|
|
|
|
|
|
|
del objs
|
2021-08-29 08:04:40 -03:00
|
|
|
gc.collect() # For PyPy or other GCs.
|
2017-04-14 08:10:00 -03:00
|
|
|
time.sleep(DELTA) # let threaded cleanup code run
|
2017-03-24 09:52:11 -03:00
|
|
|
self.assertEqual(set(wr() for wr in refs), {None})
|
|
|
|
# With a process pool, copies of the objects are returned, check
|
|
|
|
# they were released too.
|
|
|
|
self.assertEqual(CountedObject.n_instances, 0)
|
|
|
|
|
2018-12-12 21:15:30 -04:00
|
|
|
def test_enter(self):
|
|
|
|
if self.TYPE == 'manager':
|
|
|
|
self.skipTest("test not applicable to manager")
|
|
|
|
|
|
|
|
pool = self.Pool(1)
|
|
|
|
with pool:
|
|
|
|
pass
|
|
|
|
# call pool.terminate()
|
|
|
|
# pool is no longer running
|
|
|
|
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
# bpo-35477: pool.__enter__() fails if the pool is not running
|
|
|
|
with pool:
|
|
|
|
pass
|
|
|
|
pool.join()
|
|
|
|
|
2018-12-20 15:33:51 -04:00
|
|
|
def test_resource_warning(self):
|
|
|
|
if self.TYPE == 'manager':
|
|
|
|
self.skipTest("test not applicable to manager")
|
|
|
|
|
|
|
|
pool = self.Pool(1)
|
|
|
|
pool.terminate()
|
|
|
|
pool.join()
|
|
|
|
|
|
|
|
# force state to RUN to emit ResourceWarning in __del__()
|
|
|
|
pool._state = multiprocessing.pool.RUN
|
|
|
|
|
2020-08-07 12:18:38 -03:00
|
|
|
with warnings_helper.check_warnings(
|
|
|
|
('unclosed running multiprocessing pool', ResourceWarning)):
|
2018-12-20 15:33:51 -04:00
|
|
|
pool = None
|
|
|
|
support.gc_collect()
|
|
|
|
|
2010-11-09 16:55:52 -04:00
|
|
|
def raising():
|
|
|
|
raise KeyError("key")
|
|
|
|
|
|
|
|
def unpickleable_result():
|
|
|
|
return lambda: 42
|
|
|
|
|
|
|
|
class _TestPoolWorkerErrors(BaseTestCase):
|
|
|
|
ALLOWED_TYPES = ('processes', )
|
|
|
|
|
|
|
|
def test_async_error_callback(self):
|
|
|
|
p = multiprocessing.Pool(2)
|
|
|
|
|
|
|
|
scratchpad = [None]
|
|
|
|
def errback(exc):
|
|
|
|
scratchpad[0] = exc
|
|
|
|
|
|
|
|
res = p.apply_async(raising, error_callback=errback)
|
|
|
|
self.assertRaises(KeyError, res.get)
|
|
|
|
self.assertTrue(scratchpad[0])
|
|
|
|
self.assertIsInstance(scratchpad[0], KeyError)
|
|
|
|
|
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
def test_unpickleable_result(self):
|
|
|
|
from multiprocessing.pool import MaybeEncodingError
|
|
|
|
p = multiprocessing.Pool(2)
|
|
|
|
|
|
|
|
# Make sure we don't lose pool processes because of encoding errors.
|
|
|
|
for iteration in range(20):
|
|
|
|
|
|
|
|
scratchpad = [None]
|
|
|
|
def errback(exc):
|
|
|
|
scratchpad[0] = exc
|
|
|
|
|
|
|
|
res = p.apply_async(unpickleable_result, error_callback=errback)
|
|
|
|
self.assertRaises(MaybeEncodingError, res.get)
|
|
|
|
wrapped = scratchpad[0]
|
|
|
|
self.assertTrue(wrapped)
|
|
|
|
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
|
|
|
|
self.assertIsNotNone(wrapped.exc)
|
|
|
|
self.assertIsNotNone(wrapped.value)
|
2010-01-26 23:36:01 -04:00
|
|
|
|
2010-11-09 16:55:52 -04:00
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
class _TestPoolWorkerLifetime(BaseTestCase):
|
2010-01-26 23:36:01 -04:00
|
|
|
ALLOWED_TYPES = ('processes', )
|
2010-11-09 16:55:52 -04:00
|
|
|
|
2010-01-26 23:36:01 -04:00
|
|
|
def test_pool_worker_lifetime(self):
|
|
|
|
p = multiprocessing.Pool(3, maxtasksperchild=10)
|
|
|
|
self.assertEqual(3, len(p._pool))
|
|
|
|
origworkerpids = [w.pid for w in p._pool]
|
|
|
|
# Run many tasks so each worker gets replaced (hopefully)
|
|
|
|
results = []
|
|
|
|
for i in range(100):
|
|
|
|
results.append(p.apply_async(sqr, (i, )))
|
|
|
|
# Fetch the results and verify we got the right answers,
|
|
|
|
# also ensuring all the tasks have completed.
|
|
|
|
for (j, res) in enumerate(results):
|
|
|
|
self.assertEqual(res.get(), sqr(j))
|
|
|
|
# Refill the pool
|
|
|
|
p._repopulate_pool()
|
2010-03-04 12:10:10 -04:00
|
|
|
# Wait until all workers are alive
|
2011-04-06 17:51:17 -03:00
|
|
|
# (countdown * DELTA = 5 seconds max startup process time)
|
|
|
|
countdown = 50
|
2010-03-04 12:10:10 -04:00
|
|
|
while countdown and not all(w.is_alive() for w in p._pool):
|
|
|
|
countdown -= 1
|
|
|
|
time.sleep(DELTA)
|
2010-01-26 23:36:01 -04:00
|
|
|
finalworkerpids = [w.pid for w in p._pool]
|
2010-03-04 12:10:10 -04:00
|
|
|
# All pids should be assigned. See issue #7805.
|
|
|
|
self.assertNotIn(None, origworkerpids)
|
|
|
|
self.assertNotIn(None, finalworkerpids)
|
|
|
|
# Finally, check that the worker pids have changed
|
2010-01-26 23:36:01 -04:00
|
|
|
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
|
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
|
2011-10-24 13:45:29 -03:00
|
|
|
def test_pool_worker_lifetime_early_close(self):
|
|
|
|
# Issue #10332: closing a pool whose workers have limited lifetimes
|
|
|
|
# before all the tasks completed would make join() hang.
|
|
|
|
p = multiprocessing.Pool(3, maxtasksperchild=1)
|
|
|
|
results = []
|
|
|
|
for i in range(6):
|
|
|
|
results.append(p.apply_async(sqr, (i, 0.3)))
|
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
# check the results
|
|
|
|
for (j, res) in enumerate(results):
|
|
|
|
self.assertEqual(res.get(), sqr(j))
|
|
|
|
|
2022-06-17 04:14:26 -03:00
|
|
|
def test_pool_maxtasksperchild_invalid(self):
|
|
|
|
for value in [0, -1, 0.5, "12"]:
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
multiprocessing.Pool(3, maxtasksperchild=value)
|
|
|
|
|
2020-03-15 16:45:56 -03:00
|
|
|
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
|
|
|
|
# tests cases against bpo-38744 and bpo-39360
|
|
|
|
cmd = '''if 1:
|
|
|
|
from multiprocessing import Pool
|
|
|
|
problem = None
|
|
|
|
class A:
|
|
|
|
def __init__(self):
|
|
|
|
self.pool = Pool(processes=1)
|
|
|
|
def test():
|
|
|
|
global problem
|
|
|
|
problem = A()
|
|
|
|
problem.pool.map(float, tuple(range(10)))
|
|
|
|
if __name__ == "__main__":
|
|
|
|
test()
|
|
|
|
'''
|
|
|
|
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
|
|
|
|
self.assertEqual(rc, 0)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Test of creating a customized manager class
|
|
|
|
#
|
|
|
|
|
|
|
|
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
|
|
|
|
|
|
|
|
class FooBar(object):
|
|
|
|
def f(self):
|
|
|
|
return 'f()'
|
|
|
|
def g(self):
|
|
|
|
raise ValueError
|
|
|
|
def _h(self):
|
|
|
|
return '_h()'
|
|
|
|
|
|
|
|
def baz():
|
|
|
|
for i in range(10):
|
|
|
|
yield i*i
|
|
|
|
|
|
|
|
class IteratorProxy(BaseProxy):
|
2010-08-14 12:56:42 -03:00
|
|
|
_exposed_ = ('__next__',)
|
2008-06-11 13:44:04 -03:00
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
def __next__(self):
|
|
|
|
return self._callmethod('__next__')
|
|
|
|
|
|
|
|
class MyManager(BaseManager):
|
|
|
|
pass
|
|
|
|
|
|
|
|
MyManager.register('Foo', callable=FooBar)
|
|
|
|
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
|
|
|
|
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
|
|
|
|
|
|
|
|
|
|
|
|
class _TestMyManager(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('manager',)
|
|
|
|
|
|
|
|
def test_mymanager(self):
|
2022-04-19 11:27:00 -03:00
|
|
|
manager = MyManager(shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2008-06-11 13:44:04 -03:00
|
|
|
manager.start()
|
2012-06-18 17:29:30 -03:00
|
|
|
self.common(manager)
|
|
|
|
manager.shutdown()
|
|
|
|
|
2019-09-24 09:19:48 -03:00
|
|
|
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
|
|
|
|
# to the manager process if it takes longer than 1 second to stop,
|
|
|
|
# which happens on slow buildbots.
|
|
|
|
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
|
2012-06-18 17:29:30 -03:00
|
|
|
|
|
|
|
def test_mymanager_context(self):
|
2022-04-19 11:27:00 -03:00
|
|
|
manager = MyManager(shutdown_timeout=SHUTDOWN_TIMEOUT)
|
|
|
|
with manager:
|
2012-06-18 17:29:30 -03:00
|
|
|
self.common(manager)
|
2018-06-27 13:18:10 -03:00
|
|
|
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
|
2019-09-24 09:19:48 -03:00
|
|
|
# to the manager process if it takes longer than 1 second to stop,
|
|
|
|
# which happens on slow buildbots.
|
2018-06-27 13:18:10 -03:00
|
|
|
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
|
2012-06-18 17:29:30 -03:00
|
|
|
|
|
|
|
def test_mymanager_context_prestarted(self):
|
2022-04-19 11:27:00 -03:00
|
|
|
manager = MyManager(shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2012-06-18 17:29:30 -03:00
|
|
|
manager.start()
|
|
|
|
with manager:
|
|
|
|
self.common(manager)
|
|
|
|
self.assertEqual(manager._process.exitcode, 0)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2012-06-18 17:29:30 -03:00
|
|
|
def common(self, manager):
|
2008-06-11 13:44:04 -03:00
|
|
|
foo = manager.Foo()
|
|
|
|
bar = manager.Bar()
|
|
|
|
baz = manager.baz()
|
|
|
|
|
|
|
|
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
|
|
|
|
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
|
|
|
|
|
|
|
|
self.assertEqual(foo_methods, ['f', 'g'])
|
|
|
|
self.assertEqual(bar_methods, ['f', '_h'])
|
|
|
|
|
|
|
|
self.assertEqual(foo.f(), 'f()')
|
|
|
|
self.assertRaises(ValueError, foo.g)
|
|
|
|
self.assertEqual(foo._callmethod('f'), 'f()')
|
|
|
|
self.assertRaises(RemoteError, foo._callmethod, '_h')
|
|
|
|
|
|
|
|
self.assertEqual(bar.f(), 'f()')
|
|
|
|
self.assertEqual(bar._h(), '_h()')
|
|
|
|
self.assertEqual(bar._callmethod('f'), 'f()')
|
|
|
|
self.assertEqual(bar._callmethod('_h'), '_h()')
|
|
|
|
|
|
|
|
self.assertEqual(list(baz), [i*i for i in range(10)])
|
|
|
|
|
2012-06-14 11:30:10 -03:00
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Test of connecting to a remote server and using xmlrpclib for serialization
|
|
|
|
#
|
|
|
|
|
|
|
|
_queue = pyqueue.Queue()
|
|
|
|
def get_queue():
|
|
|
|
return _queue
|
|
|
|
|
|
|
|
class QueueManager(BaseManager):
|
|
|
|
'''manager class used by server process'''
|
|
|
|
QueueManager.register('get_queue', callable=get_queue)
|
|
|
|
|
|
|
|
class QueueManager2(BaseManager):
|
|
|
|
'''manager class which specifies the same interface as QueueManager'''
|
|
|
|
QueueManager2.register('get_queue')
|
|
|
|
|
|
|
|
|
|
|
|
SERIALIZER = 'xmlrpclib'
|
|
|
|
|
|
|
|
class _TestRemoteManager(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('manager',)
|
2015-02-13 09:13:33 -04:00
|
|
|
values = ['hello world', None, True, 2.25,
|
|
|
|
'hall\xe5 v\xe4rlden',
|
|
|
|
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
|
|
|
|
b'hall\xe5 v\xe4rlden',
|
|
|
|
]
|
|
|
|
result = values[:]
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _putter(cls, address, authkey):
|
2008-06-11 13:44:04 -03:00
|
|
|
manager = QueueManager2(
|
2022-04-19 11:27:00 -03:00
|
|
|
address=address, authkey=authkey, serializer=SERIALIZER,
|
|
|
|
shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2008-06-11 13:44:04 -03:00
|
|
|
manager.connect()
|
|
|
|
queue = manager.get_queue()
|
2015-02-13 09:13:33 -04:00
|
|
|
# Note that xmlrpclib will deserialize object as a list not a tuple
|
|
|
|
queue.put(tuple(cls.values))
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_remote(self):
|
|
|
|
authkey = os.urandom(32)
|
|
|
|
|
|
|
|
manager = QueueManager(
|
2022-04-19 11:27:00 -03:00
|
|
|
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER,
|
|
|
|
shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2008-06-11 13:44:04 -03:00
|
|
|
manager.start()
|
2019-02-09 13:35:05 -04:00
|
|
|
self.addCleanup(manager.shutdown)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
p = self.Process(target=self._putter, args=(manager.address, authkey))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
|
|
|
|
manager2 = QueueManager2(
|
2022-04-19 11:27:00 -03:00
|
|
|
address=manager.address, authkey=authkey, serializer=SERIALIZER,
|
|
|
|
shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2008-06-11 13:44:04 -03:00
|
|
|
manager2.connect()
|
|
|
|
queue = manager2.get_queue()
|
|
|
|
|
2015-02-13 09:13:33 -04:00
|
|
|
self.assertEqual(queue.get(), self.result)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
# Because we are using xmlrpclib for serialization instead of
|
|
|
|
# pickle this will cause a serialization error.
|
|
|
|
self.assertRaises(Exception, queue.put, time.sleep)
|
|
|
|
|
|
|
|
# Make queue finalizer run before the server is stopped
|
|
|
|
del queue
|
|
|
|
|
2020-06-04 09:48:17 -03:00
|
|
|
|
2023-05-20 20:33:09 -03:00
|
|
|
@hashlib_helper.requires_hashdigest('sha256')
|
2009-03-30 13:37:36 -03:00
|
|
|
class _TestManagerRestart(BaseTestCase):
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _putter(cls, address, authkey):
|
2009-03-30 13:37:36 -03:00
|
|
|
manager = QueueManager(
|
2022-04-19 11:27:00 -03:00
|
|
|
address=address, authkey=authkey, serializer=SERIALIZER,
|
|
|
|
shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2009-03-30 13:37:36 -03:00
|
|
|
manager.connect()
|
|
|
|
queue = manager.get_queue()
|
|
|
|
queue.put('hello world')
|
|
|
|
|
|
|
|
def test_rapid_restart(self):
|
|
|
|
authkey = os.urandom(32)
|
|
|
|
manager = QueueManager(
|
2022-04-19 11:27:00 -03:00
|
|
|
address=(socket_helper.HOST, 0), authkey=authkey,
|
|
|
|
serializer=SERIALIZER, shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2019-02-09 13:35:05 -04:00
|
|
|
try:
|
|
|
|
srvr = manager.get_server()
|
|
|
|
addr = srvr.address
|
|
|
|
# Close the connection.Listener socket which gets opened as a part
|
|
|
|
# of manager.get_server(). It's not needed for the test.
|
|
|
|
srvr.listener.close()
|
|
|
|
manager.start()
|
2009-03-30 13:37:36 -03:00
|
|
|
|
2019-02-09 13:35:05 -04:00
|
|
|
p = self.Process(target=self._putter, args=(manager.address, authkey))
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
queue = manager.get_queue()
|
|
|
|
self.assertEqual(queue.get(), 'hello world')
|
|
|
|
del queue
|
|
|
|
finally:
|
|
|
|
if hasattr(manager, "shutdown"):
|
|
|
|
manager.shutdown()
|
2017-08-16 07:46:04 -03:00
|
|
|
|
2009-03-30 13:37:36 -03:00
|
|
|
manager = QueueManager(
|
2022-04-19 11:27:00 -03:00
|
|
|
address=addr, authkey=authkey, serializer=SERIALIZER,
|
|
|
|
shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2011-04-05 13:11:33 -03:00
|
|
|
try:
|
|
|
|
manager.start()
|
2019-02-09 13:35:05 -04:00
|
|
|
self.addCleanup(manager.shutdown)
|
2012-12-25 10:47:37 -04:00
|
|
|
except OSError as e:
|
2011-04-05 13:11:33 -03:00
|
|
|
if e.errno != errno.EADDRINUSE:
|
|
|
|
raise
|
|
|
|
# Retry after some time, in case the old socket was lingering
|
|
|
|
# (sporadic failure on buildbots)
|
|
|
|
time.sleep(1.0)
|
|
|
|
manager = QueueManager(
|
2022-04-19 11:27:00 -03:00
|
|
|
address=addr, authkey=authkey, serializer=SERIALIZER,
|
|
|
|
shutdown_timeout=SHUTDOWN_TIMEOUT)
|
2019-02-09 13:35:05 -04:00
|
|
|
if hasattr(manager, "shutdown"):
|
|
|
|
self.addCleanup(manager.shutdown)
|
2009-03-30 13:37:36 -03:00
|
|
|
|
2023-08-11 14:44:18 -03:00
|
|
|
|
|
|
|
class FakeConnection:
|
|
|
|
def send(self, payload):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def recv(self):
|
|
|
|
return '#ERROR', pyqueue.Empty()
|
|
|
|
|
|
|
|
class TestManagerExceptions(unittest.TestCase):
|
|
|
|
# Issue 106558: Manager exceptions avoids creating cyclic references.
|
|
|
|
def setUp(self):
|
|
|
|
self.mgr = multiprocessing.Manager()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.mgr.shutdown()
|
|
|
|
self.mgr.join()
|
|
|
|
|
|
|
|
def test_queue_get(self):
|
|
|
|
queue = self.mgr.Queue()
|
|
|
|
if gc.isenabled():
|
|
|
|
gc.disable()
|
|
|
|
self.addCleanup(gc.enable)
|
|
|
|
try:
|
|
|
|
queue.get_nowait()
|
|
|
|
except pyqueue.Empty as e:
|
|
|
|
wr = weakref.ref(e)
|
|
|
|
self.assertEqual(wr(), None)
|
|
|
|
|
|
|
|
def test_dispatch(self):
|
|
|
|
if gc.isenabled():
|
|
|
|
gc.disable()
|
|
|
|
self.addCleanup(gc.enable)
|
|
|
|
try:
|
|
|
|
multiprocessing.managers.dispatch(FakeConnection(), None, None)
|
|
|
|
except pyqueue.Empty as e:
|
|
|
|
wr = weakref.ref(e)
|
|
|
|
self.assertEqual(wr(), None)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
SENTINEL = latin('')
|
|
|
|
|
|
|
|
class _TestConnection(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes', 'threads')
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _echo(cls, conn):
|
2008-06-11 13:44:04 -03:00
|
|
|
for msg in iter(conn.recv_bytes, SENTINEL):
|
|
|
|
conn.send_bytes(msg)
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def test_connection(self):
|
|
|
|
conn, child_conn = self.Pipe()
|
|
|
|
|
|
|
|
p = self.Process(target=self._echo, args=(child_conn,))
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
|
|
|
|
seq = [1, 2.25, None]
|
|
|
|
msg = latin('hello world')
|
|
|
|
longmsg = msg * 10
|
|
|
|
arr = array.array('i', list(range(4)))
|
|
|
|
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
self.assertEqual(type(conn.fileno()), int)
|
|
|
|
|
|
|
|
self.assertEqual(conn.send(seq), None)
|
|
|
|
self.assertEqual(conn.recv(), seq)
|
|
|
|
|
|
|
|
self.assertEqual(conn.send_bytes(msg), None)
|
|
|
|
self.assertEqual(conn.recv_bytes(), msg)
|
|
|
|
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
buffer = array.array('i', [0]*10)
|
|
|
|
expected = list(arr) + [0] * (10 - len(arr))
|
|
|
|
self.assertEqual(conn.send_bytes(arr), None)
|
|
|
|
self.assertEqual(conn.recv_bytes_into(buffer),
|
|
|
|
len(arr) * buffer.itemsize)
|
|
|
|
self.assertEqual(list(buffer), expected)
|
|
|
|
|
|
|
|
buffer = array.array('i', [0]*10)
|
|
|
|
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
|
|
|
|
self.assertEqual(conn.send_bytes(arr), None)
|
|
|
|
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
|
|
|
|
len(arr) * buffer.itemsize)
|
|
|
|
self.assertEqual(list(buffer), expected)
|
|
|
|
|
|
|
|
buffer = bytearray(latin(' ' * 40))
|
|
|
|
self.assertEqual(conn.send_bytes(longmsg), None)
|
|
|
|
try:
|
|
|
|
res = conn.recv_bytes_into(buffer)
|
|
|
|
except multiprocessing.BufferTooShort as e:
|
|
|
|
self.assertEqual(e.args, (longmsg,))
|
|
|
|
else:
|
|
|
|
self.fail('expected BufferTooShort, got %s' % res)
|
|
|
|
|
|
|
|
poll = TimingWrapper(conn.poll)
|
|
|
|
|
|
|
|
self.assertEqual(poll(), False)
|
|
|
|
self.assertTimingAlmostEqual(poll.elapsed, 0)
|
|
|
|
|
2012-05-10 12:11:12 -03:00
|
|
|
self.assertEqual(poll(-1), False)
|
|
|
|
self.assertTimingAlmostEqual(poll.elapsed, 0)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertEqual(poll(TIMEOUT1), False)
|
|
|
|
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
|
|
|
|
|
|
|
|
conn.send(None)
|
2012-12-31 12:23:09 -04:00
|
|
|
time.sleep(.1)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
self.assertEqual(poll(TIMEOUT1), True)
|
|
|
|
self.assertTimingAlmostEqual(poll.elapsed, 0)
|
|
|
|
|
|
|
|
self.assertEqual(conn.recv(), None)
|
|
|
|
|
|
|
|
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
|
|
|
|
conn.send_bytes(really_big_msg)
|
|
|
|
self.assertEqual(conn.recv_bytes(), really_big_msg)
|
|
|
|
|
|
|
|
conn.send_bytes(SENTINEL) # tell child to quit
|
|
|
|
child_conn.close()
|
|
|
|
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
self.assertEqual(conn.readable, True)
|
|
|
|
self.assertEqual(conn.writable, True)
|
|
|
|
self.assertRaises(EOFError, conn.recv)
|
|
|
|
self.assertRaises(EOFError, conn.recv_bytes)
|
|
|
|
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
def test_duplex_false(self):
|
|
|
|
reader, writer = self.Pipe(duplex=False)
|
|
|
|
self.assertEqual(writer.send(1), None)
|
|
|
|
self.assertEqual(reader.recv(), 1)
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
self.assertEqual(reader.readable, True)
|
|
|
|
self.assertEqual(reader.writable, False)
|
|
|
|
self.assertEqual(writer.readable, False)
|
|
|
|
self.assertEqual(writer.writable, True)
|
2012-12-25 10:47:37 -04:00
|
|
|
self.assertRaises(OSError, reader.send, 2)
|
|
|
|
self.assertRaises(OSError, writer.recv)
|
|
|
|
self.assertRaises(OSError, writer.poll)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_spawn_close(self):
|
|
|
|
# We test that a pipe connection can be closed by parent
|
|
|
|
# process immediately after child is spawned. On Windows this
|
|
|
|
# would have sometimes failed on old versions because
|
|
|
|
# child_conn would be closed before the child got a chance to
|
|
|
|
# duplicate it.
|
|
|
|
conn, child_conn = self.Pipe()
|
|
|
|
|
|
|
|
p = self.Process(target=self._echo, args=(child_conn,))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
child_conn.close() # this might complete before child initializes
|
|
|
|
|
|
|
|
msg = latin('hello')
|
|
|
|
conn.send_bytes(msg)
|
|
|
|
self.assertEqual(conn.recv_bytes(), msg)
|
|
|
|
|
|
|
|
conn.send_bytes(SENTINEL)
|
|
|
|
conn.close()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
def test_sendbytes(self):
|
|
|
|
if self.TYPE != 'processes':
|
2013-12-08 02:20:35 -04:00
|
|
|
self.skipTest('test not appropriate for {}'.format(self.TYPE))
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
msg = latin('abcdefghijklmnopqrstuvwxyz')
|
|
|
|
a, b = self.Pipe()
|
|
|
|
|
|
|
|
a.send_bytes(msg)
|
|
|
|
self.assertEqual(b.recv_bytes(), msg)
|
|
|
|
|
|
|
|
a.send_bytes(msg, 5)
|
|
|
|
self.assertEqual(b.recv_bytes(), msg[5:])
|
|
|
|
|
|
|
|
a.send_bytes(msg, 7, 8)
|
|
|
|
self.assertEqual(b.recv_bytes(), msg[7:7+8])
|
|
|
|
|
|
|
|
a.send_bytes(msg, 26)
|
|
|
|
self.assertEqual(b.recv_bytes(), latin(''))
|
|
|
|
|
|
|
|
a.send_bytes(msg, 26, 0)
|
|
|
|
self.assertEqual(b.recv_bytes(), latin(''))
|
|
|
|
|
|
|
|
self.assertRaises(ValueError, a.send_bytes, msg, 27)
|
|
|
|
|
|
|
|
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
|
|
|
|
|
|
|
|
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
|
|
|
|
|
|
|
|
self.assertRaises(ValueError, a.send_bytes, msg, -1)
|
|
|
|
|
|
|
|
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
|
|
|
|
|
2011-08-23 14:46:22 -03:00
|
|
|
@classmethod
|
|
|
|
def _is_fd_assigned(cls, fd):
|
|
|
|
try:
|
|
|
|
os.fstat(fd)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno == errno.EBADF:
|
|
|
|
return False
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _writefd(cls, conn, data, create_dummy_fds=False):
|
|
|
|
if create_dummy_fds:
|
|
|
|
for i in range(0, 256):
|
|
|
|
if not cls._is_fd_assigned(i):
|
|
|
|
os.dup2(conn.fileno(), i)
|
|
|
|
fd = reduction.recv_handle(conn)
|
|
|
|
if msvcrt:
|
|
|
|
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
|
|
|
|
os.write(fd, data)
|
|
|
|
os.close(fd)
|
|
|
|
|
2011-09-20 15:36:51 -03:00
|
|
|
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
|
2011-08-23 14:46:22 -03:00
|
|
|
def test_fd_transfer(self):
|
|
|
|
if self.TYPE != 'processes':
|
|
|
|
self.skipTest("only makes sense with processes")
|
|
|
|
conn, child_conn = self.Pipe(duplex=True)
|
|
|
|
|
|
|
|
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2011-08-23 14:46:22 -03:00
|
|
|
p.start()
|
2020-08-07 12:18:38 -03:00
|
|
|
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
|
|
|
|
with open(os_helper.TESTFN, "wb") as f:
|
2011-08-23 14:46:22 -03:00
|
|
|
fd = f.fileno()
|
|
|
|
if msvcrt:
|
|
|
|
fd = msvcrt.get_osfhandle(fd)
|
|
|
|
reduction.send_handle(conn, fd, p.pid)
|
|
|
|
p.join()
|
2020-08-07 12:18:38 -03:00
|
|
|
with open(os_helper.TESTFN, "rb") as f:
|
2011-08-23 14:46:22 -03:00
|
|
|
self.assertEqual(f.read(), b"foo")
|
|
|
|
|
2011-09-20 15:36:51 -03:00
|
|
|
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
|
2011-08-23 14:46:22 -03:00
|
|
|
@unittest.skipIf(sys.platform == "win32",
|
|
|
|
"test semantics don't make sense on Windows")
|
|
|
|
@unittest.skipIf(MAXFD <= 256,
|
|
|
|
"largest assignable fd number is too small")
|
|
|
|
@unittest.skipUnless(hasattr(os, "dup2"),
|
|
|
|
"test needs os.dup2()")
|
|
|
|
def test_large_fd_transfer(self):
|
|
|
|
# With fd > 256 (issue #11657)
|
|
|
|
if self.TYPE != 'processes':
|
|
|
|
self.skipTest("only makes sense with processes")
|
|
|
|
conn, child_conn = self.Pipe(duplex=True)
|
|
|
|
|
|
|
|
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2011-08-23 14:46:22 -03:00
|
|
|
p.start()
|
2020-08-07 12:18:38 -03:00
|
|
|
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
|
|
|
|
with open(os_helper.TESTFN, "wb") as f:
|
2011-08-23 14:46:22 -03:00
|
|
|
fd = f.fileno()
|
|
|
|
for newfd in range(256, MAXFD):
|
|
|
|
if not self._is_fd_assigned(newfd):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.fail("could not find an unassigned large file descriptor")
|
|
|
|
os.dup2(fd, newfd)
|
|
|
|
try:
|
|
|
|
reduction.send_handle(conn, newfd, p.pid)
|
|
|
|
finally:
|
|
|
|
os.close(newfd)
|
|
|
|
p.join()
|
2020-08-07 12:18:38 -03:00
|
|
|
with open(os_helper.TESTFN, "rb") as f:
|
2011-08-23 14:46:22 -03:00
|
|
|
self.assertEqual(f.read(), b"bar")
|
|
|
|
|
2011-09-20 22:53:25 -03:00
|
|
|
@classmethod
|
|
|
|
def _send_data_without_fd(self, conn):
|
|
|
|
os.write(conn.fileno(), b"\0")
|
|
|
|
|
2011-09-21 13:48:21 -03:00
|
|
|
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
|
2011-09-20 22:53:25 -03:00
|
|
|
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
|
|
|
|
def test_missing_fd_transfer(self):
|
|
|
|
# Check that exception is raised when received data is not
|
|
|
|
# accompanied by a file descriptor in ancillary data.
|
|
|
|
if self.TYPE != 'processes':
|
|
|
|
self.skipTest("only makes sense with processes")
|
|
|
|
conn, child_conn = self.Pipe(duplex=True)
|
|
|
|
|
|
|
|
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
|
|
|
|
p.join()
|
2011-08-23 14:46:22 -03:00
|
|
|
|
2012-06-18 13:47:52 -03:00
|
|
|
def test_context(self):
|
|
|
|
a, b = self.Pipe()
|
|
|
|
|
|
|
|
with a, b:
|
|
|
|
a.send(1729)
|
|
|
|
self.assertEqual(b.recv(), 1729)
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
self.assertFalse(a.closed)
|
|
|
|
self.assertFalse(b.closed)
|
|
|
|
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
self.assertTrue(a.closed)
|
|
|
|
self.assertTrue(b.closed)
|
2012-12-25 10:47:37 -04:00
|
|
|
self.assertRaises(OSError, a.recv)
|
|
|
|
self.assertRaises(OSError, b.recv)
|
2012-06-18 13:47:52 -03:00
|
|
|
|
2012-02-08 16:15:58 -04:00
|
|
|
class _TestListener(BaseTestCase):
|
|
|
|
|
2012-06-15 17:53:34 -03:00
|
|
|
ALLOWED_TYPES = ('processes',)
|
2012-02-08 16:15:58 -04:00
|
|
|
|
|
|
|
def test_multiple_bind(self):
|
|
|
|
for family in self.connection.families:
|
|
|
|
l = self.connection.Listener(family=family)
|
|
|
|
self.addCleanup(l.close)
|
|
|
|
self.assertRaises(OSError, self.connection.Listener,
|
|
|
|
l.address, family)
|
|
|
|
|
2012-06-18 13:47:52 -03:00
|
|
|
def test_context(self):
|
|
|
|
with self.connection.Listener() as l:
|
|
|
|
with self.connection.Client(l.address) as c:
|
|
|
|
with l.accept() as d:
|
|
|
|
c.send(1729)
|
|
|
|
self.assertEqual(d.recv(), 1729)
|
|
|
|
|
|
|
|
if self.TYPE == 'processes':
|
2012-12-25 10:47:37 -04:00
|
|
|
self.assertRaises(OSError, l.accept)
|
2012-06-18 13:47:52 -03:00
|
|
|
|
2024-02-27 10:57:59 -04:00
|
|
|
def test_empty_authkey(self):
|
|
|
|
# bpo-43952: allow empty bytes as authkey
|
|
|
|
def handler(*args):
|
|
|
|
raise RuntimeError('Connection took too long...')
|
|
|
|
|
|
|
|
def run(addr, authkey):
|
|
|
|
client = self.connection.Client(addr, authkey=authkey)
|
|
|
|
client.send(1729)
|
|
|
|
|
2024-03-06 17:39:06 -04:00
|
|
|
key = b''
|
2024-02-27 10:57:59 -04:00
|
|
|
|
|
|
|
with self.connection.Listener(authkey=key) as listener:
|
2024-03-06 17:39:06 -04:00
|
|
|
thread = threading.Thread(target=run, args=(listener.address, key))
|
|
|
|
thread.start()
|
|
|
|
try:
|
|
|
|
with listener.accept() as d:
|
|
|
|
self.assertEqual(d.recv(), 1729)
|
|
|
|
finally:
|
|
|
|
thread.join()
|
2024-02-27 10:57:59 -04:00
|
|
|
|
|
|
|
if self.TYPE == 'processes':
|
2024-03-06 17:39:06 -04:00
|
|
|
with self.assertRaises(OSError):
|
|
|
|
listener.accept()
|
2024-02-27 10:57:59 -04:00
|
|
|
|
2020-03-09 10:48:01 -03:00
|
|
|
@unittest.skipUnless(util.abstract_sockets_supported,
|
|
|
|
"test needs abstract socket support")
|
|
|
|
def test_abstract_socket(self):
|
|
|
|
with self.connection.Listener("\0something") as listener:
|
|
|
|
with self.connection.Client(listener.address) as client:
|
|
|
|
with listener.accept() as d:
|
|
|
|
client.send(1729)
|
|
|
|
self.assertEqual(d.recv(), 1729)
|
|
|
|
|
|
|
|
if self.TYPE == 'processes':
|
|
|
|
self.assertRaises(OSError, listener.accept)
|
|
|
|
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
class _TestListenerClient(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes', 'threads')
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test(cls, address):
|
|
|
|
conn = cls.connection.Client(address)
|
2008-06-11 13:44:04 -03:00
|
|
|
conn.send('hello')
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def test_listener_client(self):
|
|
|
|
for family in self.connection.families:
|
|
|
|
l = self.connection.Listener(family=family)
|
|
|
|
p = self.Process(target=self._test, args=(l.address,))
|
2008-08-19 16:17:39 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
conn = l.accept()
|
|
|
|
self.assertEqual(conn.recv(), 'hello')
|
|
|
|
p.join()
|
|
|
|
l.close()
|
2012-02-08 16:15:58 -04:00
|
|
|
|
2012-05-05 15:45:37 -03:00
|
|
|
def test_issue14725(self):
|
|
|
|
l = self.connection.Listener()
|
|
|
|
p = self.Process(target=self._test, args=(l.address,))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
time.sleep(1)
|
|
|
|
# On Windows the client process should by now have connected,
|
|
|
|
# written data and closed the pipe handle by now. This causes
|
|
|
|
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
|
|
|
|
# 14725.
|
|
|
|
conn = l.accept()
|
|
|
|
self.assertEqual(conn.recv(), 'hello')
|
|
|
|
conn.close()
|
|
|
|
p.join()
|
|
|
|
l.close()
|
|
|
|
|
2013-01-13 18:46:48 -04:00
|
|
|
def test_issue16955(self):
|
|
|
|
for fam in self.connection.families:
|
|
|
|
l = self.connection.Listener(family=fam)
|
|
|
|
c = self.connection.Client(l.address)
|
|
|
|
a = l.accept()
|
|
|
|
a.send_bytes(b"hello")
|
|
|
|
self.assertTrue(c.poll(1))
|
|
|
|
a.close()
|
|
|
|
c.close()
|
|
|
|
l.close()
|
|
|
|
|
2013-07-16 11:33:41 -03:00
|
|
|
class _TestPoll(BaseTestCase):
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes', 'threads')
|
|
|
|
|
|
|
|
def test_empty_string(self):
|
|
|
|
a, b = self.Pipe()
|
|
|
|
self.assertEqual(a.poll(), False)
|
|
|
|
b.send_bytes(b'')
|
|
|
|
self.assertEqual(a.poll(), True)
|
|
|
|
self.assertEqual(a.poll(), True)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _child_strings(cls, conn, strings):
|
|
|
|
for s in strings:
|
|
|
|
time.sleep(0.1)
|
|
|
|
conn.send_bytes(s)
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def test_strings(self):
|
|
|
|
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
|
|
|
|
a, b = self.Pipe()
|
|
|
|
p = self.Process(target=self._child_strings, args=(b, strings))
|
|
|
|
p.start()
|
|
|
|
|
|
|
|
for s in strings:
|
|
|
|
for i in range(200):
|
|
|
|
if a.poll(0.01):
|
|
|
|
break
|
|
|
|
x = a.recv_bytes()
|
|
|
|
self.assertEqual(s, x)
|
|
|
|
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _child_boundaries(cls, r):
|
|
|
|
# Polling may "pull" a message in to the child process, but we
|
|
|
|
# don't want it to pull only part of a message, as that would
|
|
|
|
# corrupt the pipe for any other processes which might later
|
|
|
|
# read from it.
|
|
|
|
r.poll(5)
|
|
|
|
|
|
|
|
def test_boundaries(self):
|
|
|
|
r, w = self.Pipe(False)
|
|
|
|
p = self.Process(target=self._child_boundaries, args=(r,))
|
|
|
|
p.start()
|
|
|
|
time.sleep(2)
|
|
|
|
L = [b"first", b"second"]
|
|
|
|
for obj in L:
|
|
|
|
w.send_bytes(obj)
|
|
|
|
w.close()
|
|
|
|
p.join()
|
|
|
|
self.assertIn(r.recv_bytes(), L)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _child_dont_merge(cls, b):
|
|
|
|
b.send_bytes(b'a')
|
|
|
|
b.send_bytes(b'b')
|
|
|
|
b.send_bytes(b'cd')
|
|
|
|
|
|
|
|
def test_dont_merge(self):
|
|
|
|
a, b = self.Pipe()
|
|
|
|
self.assertEqual(a.poll(0.0), False)
|
|
|
|
self.assertEqual(a.poll(0.1), False)
|
|
|
|
|
|
|
|
p = self.Process(target=self._child_dont_merge, args=(b,))
|
|
|
|
p.start()
|
|
|
|
|
|
|
|
self.assertEqual(a.recv_bytes(), b'a')
|
|
|
|
self.assertEqual(a.poll(1.0), True)
|
|
|
|
self.assertEqual(a.poll(1.0), True)
|
|
|
|
self.assertEqual(a.recv_bytes(), b'b')
|
|
|
|
self.assertEqual(a.poll(1.0), True)
|
|
|
|
self.assertEqual(a.poll(1.0), True)
|
|
|
|
self.assertEqual(a.poll(0.0), True)
|
|
|
|
self.assertEqual(a.recv_bytes(), b'cd')
|
|
|
|
|
|
|
|
p.join()
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Test of sending connection and socket objects between processes
|
|
|
|
#
|
2012-04-24 17:56:57 -03:00
|
|
|
|
|
|
|
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
|
2023-05-20 20:33:09 -03:00
|
|
|
@hashlib_helper.requires_hashdigest('sha256')
|
2008-06-11 13:44:04 -03:00
|
|
|
class _TestPicklingConnections(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
2012-04-27 18:51:03 -03:00
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
2013-08-14 11:35:41 -03:00
|
|
|
from multiprocessing import resource_sharer
|
2019-10-30 08:41:43 -03:00
|
|
|
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
|
2012-04-27 18:51:03 -03:00
|
|
|
|
2012-04-24 17:56:57 -03:00
|
|
|
@classmethod
|
|
|
|
def _listener(cls, conn, families):
|
2008-06-11 13:44:04 -03:00
|
|
|
for fam in families:
|
2012-04-24 17:56:57 -03:00
|
|
|
l = cls.connection.Listener(family=fam)
|
2008-06-11 13:44:04 -03:00
|
|
|
conn.send(l.address)
|
|
|
|
new_conn = l.accept()
|
|
|
|
conn.send(new_conn)
|
2012-04-24 17:56:57 -03:00
|
|
|
new_conn.close()
|
|
|
|
l.close()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2020-04-25 04:06:29 -03:00
|
|
|
l = socket.create_server((socket_helper.HOST, 0))
|
2012-05-08 18:24:47 -03:00
|
|
|
conn.send(l.getsockname())
|
2012-04-24 17:56:57 -03:00
|
|
|
new_conn, addr = l.accept()
|
|
|
|
conn.send(new_conn)
|
|
|
|
new_conn.close()
|
|
|
|
l.close()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
conn.recv()
|
|
|
|
|
2012-04-24 17:56:57 -03:00
|
|
|
@classmethod
|
|
|
|
def _remote(cls, conn):
|
2008-06-11 13:44:04 -03:00
|
|
|
for (address, msg) in iter(conn.recv, None):
|
2012-04-24 17:56:57 -03:00
|
|
|
client = cls.connection.Client(address)
|
2008-06-11 13:44:04 -03:00
|
|
|
client.send(msg.upper())
|
|
|
|
client.close()
|
|
|
|
|
2012-04-24 17:56:57 -03:00
|
|
|
address, msg = conn.recv()
|
|
|
|
client = socket.socket()
|
|
|
|
client.connect(address)
|
|
|
|
client.sendall(msg.upper())
|
|
|
|
client.close()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def test_pickling(self):
|
|
|
|
families = self.connection.families
|
|
|
|
|
|
|
|
lconn, lconn0 = self.Pipe()
|
|
|
|
lp = self.Process(target=self._listener, args=(lconn0, families))
|
2011-09-09 15:26:57 -03:00
|
|
|
lp.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
lp.start()
|
|
|
|
lconn0.close()
|
|
|
|
|
|
|
|
rconn, rconn0 = self.Pipe()
|
|
|
|
rp = self.Process(target=self._remote, args=(rconn0,))
|
2011-09-09 15:26:57 -03:00
|
|
|
rp.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
rp.start()
|
|
|
|
rconn0.close()
|
|
|
|
|
|
|
|
for fam in families:
|
|
|
|
msg = ('This connection uses family %s' % fam).encode('ascii')
|
|
|
|
address = lconn.recv()
|
|
|
|
rconn.send((address, msg))
|
|
|
|
new_conn = lconn.recv()
|
|
|
|
self.assertEqual(new_conn.recv(), msg.upper())
|
|
|
|
|
|
|
|
rconn.send(None)
|
|
|
|
|
2012-04-24 17:56:57 -03:00
|
|
|
msg = latin('This connection uses a normal socket')
|
|
|
|
address = lconn.recv()
|
|
|
|
rconn.send((address, msg))
|
|
|
|
new_conn = lconn.recv()
|
2012-04-30 10:48:50 -03:00
|
|
|
buf = []
|
|
|
|
while True:
|
|
|
|
s = new_conn.recv(100)
|
|
|
|
if not s:
|
|
|
|
break
|
|
|
|
buf.append(s)
|
|
|
|
buf = b''.join(buf)
|
|
|
|
self.assertEqual(buf, msg.upper())
|
2012-04-24 17:56:57 -03:00
|
|
|
new_conn.close()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
lconn.send(None)
|
|
|
|
|
|
|
|
rconn.close()
|
|
|
|
lconn.close()
|
|
|
|
|
|
|
|
lp.join()
|
|
|
|
rp.join()
|
2012-04-24 17:56:57 -03:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def child_access(cls, conn):
|
|
|
|
w = conn.recv()
|
|
|
|
w.send('all is well')
|
|
|
|
w.close()
|
|
|
|
|
|
|
|
r = conn.recv()
|
|
|
|
msg = r.recv()
|
|
|
|
conn.send(msg*2)
|
|
|
|
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def test_access(self):
|
|
|
|
# On Windows, if we do not specify a destination pid when
|
|
|
|
# using DupHandle then we need to be careful to use the
|
|
|
|
# correct access flags for DuplicateHandle(), or else
|
|
|
|
# DupHandle.detach() will raise PermissionError. For example,
|
|
|
|
# for a read only pipe handle we should use
|
|
|
|
# access=FILE_GENERIC_READ. (Unfortunately
|
|
|
|
# DUPLICATE_SAME_ACCESS does not work.)
|
|
|
|
conn, child_conn = self.Pipe()
|
|
|
|
p = self.Process(target=self.child_access, args=(child_conn,))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
child_conn.close()
|
|
|
|
|
|
|
|
r, w = self.Pipe(duplex=False)
|
|
|
|
conn.send(w)
|
|
|
|
w.close()
|
|
|
|
self.assertEqual(r.recv(), 'all is well')
|
|
|
|
r.close()
|
|
|
|
|
|
|
|
r, w = self.Pipe(duplex=False)
|
|
|
|
conn.send(r)
|
|
|
|
r.close()
|
|
|
|
w.send('foobar')
|
|
|
|
w.close()
|
|
|
|
self.assertEqual(conn.recv(), 'foobar'*2)
|
|
|
|
|
2017-07-24 21:40:55 -03:00
|
|
|
p.join()
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
class _TestHeap(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
2018-04-09 12:37:55 -03:00
|
|
|
def setUp(self):
|
|
|
|
super().setUp()
|
|
|
|
# Make pristine heap for these tests
|
|
|
|
self.old_heap = multiprocessing.heap.BufferWrapper._heap
|
|
|
|
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
multiprocessing.heap.BufferWrapper._heap = self.old_heap
|
|
|
|
super().tearDown()
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
def test_heap(self):
|
|
|
|
iterations = 5000
|
|
|
|
maxblocks = 50
|
|
|
|
blocks = []
|
|
|
|
|
2018-04-09 12:37:55 -03:00
|
|
|
# get the heap object
|
|
|
|
heap = multiprocessing.heap.BufferWrapper._heap
|
|
|
|
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
# create and destroy lots of blocks of different sizes
|
|
|
|
for i in range(iterations):
|
|
|
|
size = int(random.lognormvariate(0, 1) * 1000)
|
|
|
|
b = multiprocessing.heap.BufferWrapper(size)
|
|
|
|
blocks.append(b)
|
|
|
|
if len(blocks) > maxblocks:
|
|
|
|
i = random.randrange(maxblocks)
|
|
|
|
del blocks[i]
|
2018-04-09 12:37:55 -03:00
|
|
|
del b
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
# verify the state of the heap
|
2018-04-09 12:37:55 -03:00
|
|
|
with heap._lock:
|
|
|
|
all = []
|
|
|
|
free = 0
|
|
|
|
occupied = 0
|
|
|
|
for L in list(heap._len_to_seq.values()):
|
|
|
|
# count all free blocks in arenas
|
|
|
|
for arena, start, stop in L:
|
|
|
|
all.append((heap._arenas.index(arena), start, stop,
|
|
|
|
stop-start, 'free'))
|
|
|
|
free += (stop-start)
|
|
|
|
for arena, arena_blocks in heap._allocated_blocks.items():
|
|
|
|
# count all allocated blocks in arenas
|
|
|
|
for start, stop in arena_blocks:
|
|
|
|
all.append((heap._arenas.index(arena), start, stop,
|
|
|
|
stop-start, 'occupied'))
|
|
|
|
occupied += (stop-start)
|
|
|
|
|
|
|
|
self.assertEqual(free + occupied,
|
|
|
|
sum(arena.size for arena in heap._arenas))
|
|
|
|
|
|
|
|
all.sort()
|
|
|
|
|
|
|
|
for i in range(len(all)-1):
|
|
|
|
(arena, start, stop) = all[i][:3]
|
|
|
|
(narena, nstart, nstop) = all[i+1][:3]
|
|
|
|
if arena != narena:
|
|
|
|
# Two different arenas
|
|
|
|
self.assertEqual(stop, heap._arenas[arena].size) # last block
|
|
|
|
self.assertEqual(nstart, 0) # first block
|
|
|
|
else:
|
|
|
|
# Same arena: two adjacent blocks
|
|
|
|
self.assertEqual(stop, nstart)
|
|
|
|
|
|
|
|
# test free'ing all blocks
|
|
|
|
random.shuffle(blocks)
|
|
|
|
while blocks:
|
|
|
|
blocks.pop()
|
|
|
|
|
|
|
|
self.assertEqual(heap._n_frees, heap._n_mallocs)
|
|
|
|
self.assertEqual(len(heap._pending_free_blocks), 0)
|
|
|
|
self.assertEqual(len(heap._arenas), 0)
|
|
|
|
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
|
|
|
|
self.assertEqual(len(heap._len_to_seq), 0)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2011-07-02 09:35:49 -03:00
|
|
|
def test_free_from_gc(self):
|
|
|
|
# Check that freeing of blocks by the garbage collector doesn't deadlock
|
|
|
|
# (issue #12352).
|
|
|
|
# Make sure the GC is enabled, and set lower collection thresholds to
|
|
|
|
# make collections more frequent (and increase the probability of
|
|
|
|
# deadlock).
|
|
|
|
if not gc.isenabled():
|
|
|
|
gc.enable()
|
|
|
|
self.addCleanup(gc.disable)
|
|
|
|
thresholds = gc.get_threshold()
|
|
|
|
self.addCleanup(gc.set_threshold, *thresholds)
|
|
|
|
gc.set_threshold(10)
|
|
|
|
|
|
|
|
# perform numerous block allocations, with cyclic references to make
|
|
|
|
# sure objects are collected asynchronously by the gc
|
|
|
|
for i in range(5000):
|
|
|
|
a = multiprocessing.heap.BufferWrapper(1)
|
|
|
|
b = multiprocessing.heap.BufferWrapper(1)
|
|
|
|
# circular references
|
|
|
|
a.buddy = b
|
|
|
|
b.buddy = a
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
class _Foo(Structure):
|
|
|
|
_fields_ = [
|
|
|
|
('x', c_int),
|
2017-07-21 07:35:33 -03:00
|
|
|
('y', c_double),
|
|
|
|
('z', c_longlong,)
|
2008-06-11 13:44:04 -03:00
|
|
|
]
|
|
|
|
|
|
|
|
class _TestSharedCTypes(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
2010-11-22 12:26:21 -04:00
|
|
|
def setUp(self):
|
|
|
|
if not HAS_SHAREDCTYPES:
|
|
|
|
self.skipTest("requires multiprocessing.sharedctypes")
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
2017-07-21 07:35:33 -03:00
|
|
|
def _double(cls, x, y, z, foo, arr, string):
|
2008-06-11 13:44:04 -03:00
|
|
|
x.value *= 2
|
|
|
|
y.value *= 2
|
2017-07-21 07:35:33 -03:00
|
|
|
z.value *= 2
|
2008-06-11 13:44:04 -03:00
|
|
|
foo.x *= 2
|
|
|
|
foo.y *= 2
|
|
|
|
string.value *= 2
|
|
|
|
for i in range(len(arr)):
|
|
|
|
arr[i] *= 2
|
|
|
|
|
|
|
|
def test_sharedctypes(self, lock=False):
|
|
|
|
x = Value('i', 7, lock=lock)
|
Merged revisions 78018,78035-78040,78042-78043,78046,78048-78052,78054,78059,78075-78080 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r78018 | georg.brandl | 2010-02-06 11:08:21 +0100 (Sa, 06 Feb 2010) | 1 line
#7864: make deprecation notices a bit clearer.
........
r78035 | georg.brandl | 2010-02-06 23:44:17 +0100 (Sa, 06 Feb 2010) | 1 line
Fix duplicate import.
........
r78036 | georg.brandl | 2010-02-06 23:49:47 +0100 (Sa, 06 Feb 2010) | 1 line
Remove unused import.
........
r78037 | georg.brandl | 2010-02-06 23:59:15 +0100 (Sa, 06 Feb 2010) | 1 line
No need to assign the results of expressions used only for side effects.
........
r78038 | georg.brandl | 2010-02-07 00:02:29 +0100 (So, 07 Feb 2010) | 1 line
Add a missing import.
........
r78039 | georg.brandl | 2010-02-07 00:06:24 +0100 (So, 07 Feb 2010) | 1 line
Add missing imports.
........
r78040 | georg.brandl | 2010-02-07 00:08:00 +0100 (So, 07 Feb 2010) | 1 line
Fix a few UnboundLocalErrors in test_long.
........
r78042 | georg.brandl | 2010-02-07 00:12:12 +0100 (So, 07 Feb 2010) | 1 line
Add missing import.
........
r78043 | georg.brandl | 2010-02-07 00:12:19 +0100 (So, 07 Feb 2010) | 1 line
Remove duplicate test method.
........
r78046 | georg.brandl | 2010-02-07 00:18:00 +0100 (So, 07 Feb 2010) | 1 line
Fix various missing import/unbound name errors.
........
r78048 | georg.brandl | 2010-02-07 00:23:45 +0100 (So, 07 Feb 2010) | 1 line
We heard you like test failures so we put unbound locals in your test so that you can fail while you fail.
........
r78049 | georg.brandl | 2010-02-07 00:33:33 +0100 (So, 07 Feb 2010) | 1 line
Fix import/access for some identifiers. _TestSharedCTypes does not seem to be executed?
........
r78050 | georg.brandl | 2010-02-07 00:34:10 +0100 (So, 07 Feb 2010) | 1 line
Fix more unbound locals in code paths that do not seem to be used.
........
r78051 | georg.brandl | 2010-02-07 00:53:52 +0100 (So, 07 Feb 2010) | 1 line
Add missing import when running these tests standalone.
........
r78052 | georg.brandl | 2010-02-07 00:54:04 +0100 (So, 07 Feb 2010) | 1 line
Add missing import when running these tests standalone.
........
r78054 | georg.brandl | 2010-02-07 00:58:25 +0100 (So, 07 Feb 2010) | 1 line
Add missing import.
........
r78059 | georg.brandl | 2010-02-07 12:34:15 +0100 (So, 07 Feb 2010) | 1 line
Use "regexp" consistently.
........
r78075 | georg.brandl | 2010-02-07 13:16:12 +0100 (So, 07 Feb 2010) | 1 line
Fix another duplicated test method.
........
r78076 | georg.brandl | 2010-02-07 13:19:43 +0100 (So, 07 Feb 2010) | 1 line
Fix wrong usage of "except X, Y:".
........
r78077 | georg.brandl | 2010-02-07 13:25:50 +0100 (So, 07 Feb 2010) | 1 line
Fix two redefined test methods.
........
r78078 | georg.brandl | 2010-02-07 13:27:06 +0100 (So, 07 Feb 2010) | 1 line
Fix a redefined test method.
........
r78079 | georg.brandl | 2010-02-07 13:34:26 +0100 (So, 07 Feb 2010) | 1 line
Add a minimal test for fnmatchcase().
........
r78080 | georg.brandl | 2010-02-07 13:55:12 +0100 (So, 07 Feb 2010) | 1 line
Remove duplicate test method.
........
2010-03-14 07:23:39 -03:00
|
|
|
y = Value(c_double, 1.0/3.0, lock=lock)
|
2017-07-21 07:35:33 -03:00
|
|
|
z = Value(c_longlong, 2 ** 33, lock=lock)
|
2008-06-11 13:44:04 -03:00
|
|
|
foo = Value(_Foo, 3, 2, lock=lock)
|
Merged revisions 78018,78035-78040,78042-78043,78046,78048-78052,78054,78059,78075-78080 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r78018 | georg.brandl | 2010-02-06 11:08:21 +0100 (Sa, 06 Feb 2010) | 1 line
#7864: make deprecation notices a bit clearer.
........
r78035 | georg.brandl | 2010-02-06 23:44:17 +0100 (Sa, 06 Feb 2010) | 1 line
Fix duplicate import.
........
r78036 | georg.brandl | 2010-02-06 23:49:47 +0100 (Sa, 06 Feb 2010) | 1 line
Remove unused import.
........
r78037 | georg.brandl | 2010-02-06 23:59:15 +0100 (Sa, 06 Feb 2010) | 1 line
No need to assign the results of expressions used only for side effects.
........
r78038 | georg.brandl | 2010-02-07 00:02:29 +0100 (So, 07 Feb 2010) | 1 line
Add a missing import.
........
r78039 | georg.brandl | 2010-02-07 00:06:24 +0100 (So, 07 Feb 2010) | 1 line
Add missing imports.
........
r78040 | georg.brandl | 2010-02-07 00:08:00 +0100 (So, 07 Feb 2010) | 1 line
Fix a few UnboundLocalErrors in test_long.
........
r78042 | georg.brandl | 2010-02-07 00:12:12 +0100 (So, 07 Feb 2010) | 1 line
Add missing import.
........
r78043 | georg.brandl | 2010-02-07 00:12:19 +0100 (So, 07 Feb 2010) | 1 line
Remove duplicate test method.
........
r78046 | georg.brandl | 2010-02-07 00:18:00 +0100 (So, 07 Feb 2010) | 1 line
Fix various missing import/unbound name errors.
........
r78048 | georg.brandl | 2010-02-07 00:23:45 +0100 (So, 07 Feb 2010) | 1 line
We heard you like test failures so we put unbound locals in your test so that you can fail while you fail.
........
r78049 | georg.brandl | 2010-02-07 00:33:33 +0100 (So, 07 Feb 2010) | 1 line
Fix import/access for some identifiers. _TestSharedCTypes does not seem to be executed?
........
r78050 | georg.brandl | 2010-02-07 00:34:10 +0100 (So, 07 Feb 2010) | 1 line
Fix more unbound locals in code paths that do not seem to be used.
........
r78051 | georg.brandl | 2010-02-07 00:53:52 +0100 (So, 07 Feb 2010) | 1 line
Add missing import when running these tests standalone.
........
r78052 | georg.brandl | 2010-02-07 00:54:04 +0100 (So, 07 Feb 2010) | 1 line
Add missing import when running these tests standalone.
........
r78054 | georg.brandl | 2010-02-07 00:58:25 +0100 (So, 07 Feb 2010) | 1 line
Add missing import.
........
r78059 | georg.brandl | 2010-02-07 12:34:15 +0100 (So, 07 Feb 2010) | 1 line
Use "regexp" consistently.
........
r78075 | georg.brandl | 2010-02-07 13:16:12 +0100 (So, 07 Feb 2010) | 1 line
Fix another duplicated test method.
........
r78076 | georg.brandl | 2010-02-07 13:19:43 +0100 (So, 07 Feb 2010) | 1 line
Fix wrong usage of "except X, Y:".
........
r78077 | georg.brandl | 2010-02-07 13:25:50 +0100 (So, 07 Feb 2010) | 1 line
Fix two redefined test methods.
........
r78078 | georg.brandl | 2010-02-07 13:27:06 +0100 (So, 07 Feb 2010) | 1 line
Fix a redefined test method.
........
r78079 | georg.brandl | 2010-02-07 13:34:26 +0100 (So, 07 Feb 2010) | 1 line
Add a minimal test for fnmatchcase().
........
r78080 | georg.brandl | 2010-02-07 13:55:12 +0100 (So, 07 Feb 2010) | 1 line
Remove duplicate test method.
........
2010-03-14 07:23:39 -03:00
|
|
|
arr = self.Array('d', list(range(10)), lock=lock)
|
|
|
|
string = self.Array('c', 20, lock=lock)
|
2010-10-06 22:12:19 -03:00
|
|
|
string.value = latin('hello')
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2017-07-21 07:35:33 -03:00
|
|
|
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
self.assertEqual(x.value, 14)
|
|
|
|
self.assertAlmostEqual(y.value, 2.0/3.0)
|
2017-07-21 07:35:33 -03:00
|
|
|
self.assertEqual(z.value, 2 ** 34)
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertEqual(foo.x, 6)
|
|
|
|
self.assertAlmostEqual(foo.y, 4.0)
|
|
|
|
for i in range(10):
|
|
|
|
self.assertAlmostEqual(arr[i], i*2)
|
|
|
|
self.assertEqual(string.value, latin('hellohello'))
|
|
|
|
|
|
|
|
def test_synchronize(self):
|
|
|
|
self.test_sharedctypes(lock=True)
|
|
|
|
|
|
|
|
def test_copy(self):
|
2017-07-21 07:35:33 -03:00
|
|
|
foo = _Foo(2, 5.0, 2 ** 33)
|
2010-10-06 22:12:19 -03:00
|
|
|
bar = copy(foo)
|
2008-06-11 13:44:04 -03:00
|
|
|
foo.x = 0
|
|
|
|
foo.y = 0
|
2017-07-21 07:35:33 -03:00
|
|
|
foo.z = 0
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertEqual(bar.x, 2)
|
|
|
|
self.assertAlmostEqual(bar.y, 5.0)
|
2017-07-21 07:35:33 -03:00
|
|
|
self.assertEqual(bar.z, 2 ** 33)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
|
|
|
|
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
|
2023-05-20 20:33:09 -03:00
|
|
|
@hashlib_helper.requires_hashdigest('sha256')
|
2019-02-24 00:08:16 -04:00
|
|
|
class _TestSharedMemory(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
|
|
|
|
if isinstance(shmem_name_or_obj, str):
|
|
|
|
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
|
|
|
|
else:
|
|
|
|
local_sms = shmem_name_or_obj
|
|
|
|
local_sms.buf[:len(binary_data)] = binary_data
|
|
|
|
local_sms.close()
|
|
|
|
|
2021-10-01 04:56:32 -03:00
|
|
|
def _new_shm_name(self, prefix):
|
|
|
|
# Add a PID to the name of a POSIX shared memory object to allow
|
|
|
|
# running multiprocessing tests (test_multiprocessing_fork,
|
|
|
|
# test_multiprocessing_spawn, etc) in parallel.
|
|
|
|
return prefix + str(os.getpid())
|
|
|
|
|
2024-02-25 05:31:03 -04:00
|
|
|
def test_shared_memory_name_with_embedded_null(self):
|
|
|
|
name_tsmb = self._new_shm_name('test01_null')
|
|
|
|
sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512)
|
|
|
|
self.addCleanup(sms.unlink)
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
shared_memory.SharedMemory(name_tsmb + '\0a', create=False, size=512)
|
|
|
|
if shared_memory._USE_POSIX:
|
|
|
|
orig_name = sms._name
|
|
|
|
try:
|
|
|
|
sms._name = orig_name + '\0a'
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
sms.unlink()
|
|
|
|
finally:
|
|
|
|
sms._name = orig_name
|
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
def test_shared_memory_basics(self):
|
2021-10-01 04:56:32 -03:00
|
|
|
name_tsmb = self._new_shm_name('test01_tsmb')
|
|
|
|
sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512)
|
2019-02-24 00:08:16 -04:00
|
|
|
self.addCleanup(sms.unlink)
|
|
|
|
|
|
|
|
# Verify attributes are readable.
|
2021-10-01 04:56:32 -03:00
|
|
|
self.assertEqual(sms.name, name_tsmb)
|
2019-02-24 00:08:16 -04:00
|
|
|
self.assertGreaterEqual(sms.size, 512)
|
|
|
|
self.assertGreaterEqual(len(sms.buf), sms.size)
|
|
|
|
|
2020-07-19 10:35:52 -03:00
|
|
|
# Verify __repr__
|
|
|
|
self.assertIn(sms.name, str(sms))
|
|
|
|
self.assertIn(str(sms.size), str(sms))
|
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
# Modify contents of shared memory segment through memoryview.
|
|
|
|
sms.buf[0] = 42
|
|
|
|
self.assertEqual(sms.buf[0], 42)
|
|
|
|
|
|
|
|
# Attach to existing shared memory segment.
|
2021-10-01 04:56:32 -03:00
|
|
|
also_sms = shared_memory.SharedMemory(name_tsmb)
|
2019-02-24 00:08:16 -04:00
|
|
|
self.assertEqual(also_sms.buf[0], 42)
|
|
|
|
also_sms.close()
|
|
|
|
|
|
|
|
# Attach to existing shared memory segment but specify a new size.
|
2021-10-01 04:56:32 -03:00
|
|
|
same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size)
|
2019-02-24 00:08:16 -04:00
|
|
|
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
|
|
|
|
same_sms.close()
|
|
|
|
|
2019-09-09 13:48:55 -03:00
|
|
|
# Creating Shared Memory Segment with -ve size
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
shared_memory.SharedMemory(create=True, size=-2)
|
|
|
|
|
|
|
|
# Attaching Shared Memory Segment without a name
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
shared_memory.SharedMemory(create=False)
|
|
|
|
|
|
|
|
# Test if shared memory segment is created properly,
|
|
|
|
# when _make_filename returns an existing shared memory segment name
|
|
|
|
with unittest.mock.patch(
|
|
|
|
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
|
|
|
|
|
2019-09-10 03:48:24 -03:00
|
|
|
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
|
2021-10-01 04:56:32 -03:00
|
|
|
names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')]
|
2019-09-10 03:48:24 -03:00
|
|
|
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
|
|
|
|
# because some POSIX compliant systems require name to start with /
|
|
|
|
names = [NAME_PREFIX + name for name in names]
|
|
|
|
|
2019-09-09 13:48:55 -03:00
|
|
|
mock_make_filename.side_effect = names
|
|
|
|
shm1 = shared_memory.SharedMemory(create=True, size=1)
|
|
|
|
self.addCleanup(shm1.unlink)
|
2019-09-10 03:48:24 -03:00
|
|
|
self.assertEqual(shm1._name, names[0])
|
2019-09-09 13:48:55 -03:00
|
|
|
|
|
|
|
mock_make_filename.side_effect = names
|
|
|
|
shm2 = shared_memory.SharedMemory(create=True, size=1)
|
|
|
|
self.addCleanup(shm2.unlink)
|
2019-09-10 03:48:24 -03:00
|
|
|
self.assertEqual(shm2._name, names[1])
|
2019-09-09 13:48:55 -03:00
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
if shared_memory._USE_POSIX:
|
|
|
|
# Posix Shared Memory can only be unlinked once. Here we
|
|
|
|
# test an implementation detail that is not observed across
|
|
|
|
# all supported platforms (since WindowsNamedSharedMemory
|
|
|
|
# manages unlinking on its own and unlink() does nothing).
|
|
|
|
# True release of shared memory segment does not necessarily
|
|
|
|
# happen until process exits, depending on the OS platform.
|
2021-10-01 04:56:32 -03:00
|
|
|
name_dblunlink = self._new_shm_name('test01_dblunlink')
|
|
|
|
sms_uno = shared_memory.SharedMemory(
|
|
|
|
name_dblunlink,
|
|
|
|
create=True,
|
|
|
|
size=5000
|
|
|
|
)
|
2019-02-24 00:08:16 -04:00
|
|
|
with self.assertRaises(FileNotFoundError):
|
|
|
|
try:
|
|
|
|
self.assertGreaterEqual(sms_uno.size, 5000)
|
|
|
|
|
2021-10-01 04:56:32 -03:00
|
|
|
sms_duo = shared_memory.SharedMemory(name_dblunlink)
|
2019-02-24 00:08:16 -04:00
|
|
|
sms_duo.unlink() # First shm_unlink() call.
|
|
|
|
sms_duo.close()
|
|
|
|
sms_uno.close()
|
|
|
|
|
|
|
|
finally:
|
|
|
|
sms_uno.unlink() # A second shm_unlink() call is bad.
|
|
|
|
|
|
|
|
with self.assertRaises(FileExistsError):
|
|
|
|
# Attempting to create a new shared memory segment with a
|
|
|
|
# name that is already in use triggers an exception.
|
|
|
|
there_can_only_be_one_sms = shared_memory.SharedMemory(
|
2021-10-01 04:56:32 -03:00
|
|
|
name_tsmb,
|
2019-02-24 00:08:16 -04:00
|
|
|
create=True,
|
|
|
|
size=512
|
|
|
|
)
|
|
|
|
|
|
|
|
if shared_memory._USE_POSIX:
|
|
|
|
# Requesting creation of a shared memory segment with the option
|
|
|
|
# to attach to an existing segment, if that name is currently in
|
|
|
|
# use, should not trigger an exception.
|
|
|
|
# Note: Using a smaller size could possibly cause truncation of
|
|
|
|
# the existing segment but is OS platform dependent. In the
|
|
|
|
# case of MacOS/darwin, requesting a smaller size is disallowed.
|
|
|
|
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
|
|
|
|
_flags = os.O_CREAT | os.O_RDWR
|
2021-10-01 04:56:32 -03:00
|
|
|
ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb)
|
2019-02-24 00:08:16 -04:00
|
|
|
self.assertEqual(ok_if_exists_sms.size, sms.size)
|
|
|
|
ok_if_exists_sms.close()
|
|
|
|
|
|
|
|
# Attempting to attach to an existing shared memory segment when
|
|
|
|
# no segment exists with the supplied name triggers an exception.
|
|
|
|
with self.assertRaises(FileNotFoundError):
|
|
|
|
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
|
|
|
|
nonexisting_sms.unlink() # Error should occur on prior line.
|
|
|
|
|
|
|
|
sms.close()
|
|
|
|
|
2021-10-01 07:45:59 -03:00
|
|
|
def test_shared_memory_recreate(self):
|
|
|
|
# Test if shared memory segment is created properly,
|
|
|
|
# when _make_filename returns an existing shared memory segment name
|
|
|
|
with unittest.mock.patch(
|
|
|
|
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
|
|
|
|
|
|
|
|
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
|
2022-07-25 01:44:40 -03:00
|
|
|
names = [self._new_shm_name('test03_fn'), self._new_shm_name('test04_fn')]
|
2021-10-01 07:45:59 -03:00
|
|
|
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
|
|
|
|
# because some POSIX compliant systems require name to start with /
|
|
|
|
names = [NAME_PREFIX + name for name in names]
|
|
|
|
|
|
|
|
mock_make_filename.side_effect = names
|
|
|
|
shm1 = shared_memory.SharedMemory(create=True, size=1)
|
|
|
|
self.addCleanup(shm1.unlink)
|
|
|
|
self.assertEqual(shm1._name, names[0])
|
|
|
|
|
|
|
|
mock_make_filename.side_effect = names
|
|
|
|
shm2 = shared_memory.SharedMemory(create=True, size=1)
|
|
|
|
self.addCleanup(shm2.unlink)
|
|
|
|
self.assertEqual(shm2._name, names[1])
|
|
|
|
|
2024-02-25 05:31:03 -04:00
|
|
|
def test_invalid_shared_memory_creation(self):
|
2020-08-30 16:03:11 -03:00
|
|
|
# Test creating a shared memory segment with negative size
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
|
|
|
|
|
|
|
|
# Test creating a shared memory segment with size 0
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
|
|
|
|
|
|
|
|
# Test creating a shared memory segment without size argument
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
sms_invalid = shared_memory.SharedMemory(create=True)
|
|
|
|
|
2021-10-01 07:45:59 -03:00
|
|
|
def test_shared_memory_pickle_unpickle(self):
|
|
|
|
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
|
|
|
with self.subTest(proto=proto):
|
|
|
|
sms = shared_memory.SharedMemory(create=True, size=512)
|
|
|
|
self.addCleanup(sms.unlink)
|
|
|
|
sms.buf[0:6] = b'pickle'
|
|
|
|
|
|
|
|
# Test pickling
|
|
|
|
pickled_sms = pickle.dumps(sms, protocol=proto)
|
|
|
|
|
|
|
|
# Test unpickling
|
|
|
|
sms2 = pickle.loads(pickled_sms)
|
|
|
|
self.assertIsInstance(sms2, shared_memory.SharedMemory)
|
|
|
|
self.assertEqual(sms.name, sms2.name)
|
|
|
|
self.assertEqual(bytes(sms.buf[0:6]), b'pickle')
|
|
|
|
self.assertEqual(bytes(sms2.buf[0:6]), b'pickle')
|
|
|
|
|
|
|
|
# Test that unpickled version is still the same SharedMemory
|
|
|
|
sms.buf[0:6] = b'newval'
|
|
|
|
self.assertEqual(bytes(sms.buf[0:6]), b'newval')
|
|
|
|
self.assertEqual(bytes(sms2.buf[0:6]), b'newval')
|
|
|
|
|
|
|
|
sms2.buf[0:6] = b'oldval'
|
|
|
|
self.assertEqual(bytes(sms.buf[0:6]), b'oldval')
|
|
|
|
self.assertEqual(bytes(sms2.buf[0:6]), b'oldval')
|
|
|
|
|
|
|
|
def test_shared_memory_pickle_unpickle_dead_object(self):
|
|
|
|
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
|
|
|
with self.subTest(proto=proto):
|
|
|
|
sms = shared_memory.SharedMemory(create=True, size=512)
|
|
|
|
sms.buf[0:6] = b'pickle'
|
|
|
|
pickled_sms = pickle.dumps(sms, protocol=proto)
|
|
|
|
|
|
|
|
# Now, we are going to kill the original object.
|
|
|
|
# So, unpickled one won't be able to attach to it.
|
|
|
|
sms.close()
|
|
|
|
sms.unlink()
|
|
|
|
|
|
|
|
with self.assertRaises(FileNotFoundError):
|
|
|
|
pickle.loads(pickled_sms)
|
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
def test_shared_memory_across_processes(self):
|
2020-05-04 12:05:54 -03:00
|
|
|
# bpo-40135: don't define shared memory block's name in case of
|
|
|
|
# the failure when we run multiprocessing tests in parallel.
|
|
|
|
sms = shared_memory.SharedMemory(create=True, size=512)
|
2019-02-24 00:08:16 -04:00
|
|
|
self.addCleanup(sms.unlink)
|
|
|
|
|
|
|
|
# Verify remote attachment to existing block by name is working.
|
|
|
|
p = self.Process(
|
|
|
|
target=self._attach_existing_shmem_then_write,
|
|
|
|
args=(sms.name, b'howdy')
|
|
|
|
)
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
|
|
|
|
|
|
|
|
# Verify pickling of SharedMemory instance also works.
|
|
|
|
p = self.Process(
|
|
|
|
target=self._attach_existing_shmem_then_write,
|
|
|
|
args=(sms, b'HELLO')
|
|
|
|
)
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
|
|
|
|
|
|
|
|
sms.close()
|
|
|
|
|
2019-05-10 15:42:35 -03:00
|
|
|
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
|
|
|
|
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
|
|
|
|
# bpo-36368: protect SharedMemoryManager server process from
|
|
|
|
# KeyboardInterrupt signals.
|
|
|
|
smm = multiprocessing.managers.SharedMemoryManager()
|
|
|
|
smm.start()
|
|
|
|
|
|
|
|
# make sure the manager works properly at the beginning
|
|
|
|
sl = smm.ShareableList(range(10))
|
|
|
|
|
|
|
|
# the manager's server should ignore KeyboardInterrupt signals, and
|
|
|
|
# maintain its connection with the current process, and success when
|
|
|
|
# asked to deliver memory segments.
|
|
|
|
os.kill(smm._process.pid, signal.SIGINT)
|
|
|
|
|
|
|
|
sl2 = smm.ShareableList(range(10))
|
|
|
|
|
|
|
|
# test that the custom signal handler registered in the Manager does
|
|
|
|
# not affect signal handling in the parent process.
|
|
|
|
with self.assertRaises(KeyboardInterrupt):
|
|
|
|
os.kill(os.getpid(), signal.SIGINT)
|
|
|
|
|
|
|
|
smm.shutdown()
|
|
|
|
|
2019-05-13 16:15:32 -03:00
|
|
|
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
|
|
|
|
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
|
|
|
|
# bpo-36867: test that a SharedMemoryManager uses the
|
|
|
|
# same resource_tracker process as its parent.
|
2023-02-03 19:20:46 -04:00
|
|
|
cmd = '''if 1:
|
2019-05-13 16:15:32 -03:00
|
|
|
from multiprocessing.managers import SharedMemoryManager
|
2023-02-03 19:20:46 -04:00
|
|
|
|
2019-05-13 16:15:32 -03:00
|
|
|
|
|
|
|
smm = SharedMemoryManager()
|
|
|
|
smm.start()
|
|
|
|
sl = smm.ShareableList(range(10))
|
|
|
|
smm.shutdown()
|
|
|
|
'''
|
|
|
|
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
|
|
|
|
|
|
|
|
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
|
|
|
|
# resource_tracker process as its parent would make the parent's
|
|
|
|
# tracker complain about sl being leaked even though smm.shutdown()
|
|
|
|
# properly released sl.
|
|
|
|
self.assertFalse(err)
|
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
def test_shared_memory_SharedMemoryManager_basics(self):
|
|
|
|
smm1 = multiprocessing.managers.SharedMemoryManager()
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
|
|
|
|
smm1.start()
|
|
|
|
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
|
|
|
|
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
|
|
|
|
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
|
|
|
|
self.assertEqual(len(doppleganger_list0), 5)
|
|
|
|
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
|
|
|
|
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
|
|
|
|
held_name = lom[0].name
|
|
|
|
smm1.shutdown()
|
|
|
|
if sys.platform != "win32":
|
|
|
|
# Calls to unlink() have no effect on Windows platform; shared
|
|
|
|
# memory will only be released once final process exits.
|
|
|
|
with self.assertRaises(FileNotFoundError):
|
|
|
|
# No longer there to be attached to again.
|
|
|
|
absent_shm = shared_memory.SharedMemory(name=held_name)
|
|
|
|
|
|
|
|
with multiprocessing.managers.SharedMemoryManager() as smm2:
|
|
|
|
sl = smm2.ShareableList("howdy")
|
|
|
|
shm = smm2.SharedMemory(size=128)
|
|
|
|
held_name = sl.shm.name
|
|
|
|
if sys.platform != "win32":
|
|
|
|
with self.assertRaises(FileNotFoundError):
|
|
|
|
# No longer there to be attached to again.
|
|
|
|
absent_sl = shared_memory.ShareableList(name=held_name)
|
|
|
|
|
|
|
|
|
|
|
|
def test_shared_memory_ShareableList_basics(self):
|
|
|
|
sl = shared_memory.ShareableList(
|
|
|
|
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
|
|
|
|
)
|
|
|
|
self.addCleanup(sl.shm.unlink)
|
|
|
|
|
2020-07-19 10:35:52 -03:00
|
|
|
# Verify __repr__
|
|
|
|
self.assertIn(sl.shm.name, str(sl))
|
|
|
|
self.assertIn(str(list(sl)), str(sl))
|
|
|
|
|
|
|
|
# Index Out of Range (get)
|
|
|
|
with self.assertRaises(IndexError):
|
|
|
|
sl[7]
|
|
|
|
|
|
|
|
# Index Out of Range (set)
|
|
|
|
with self.assertRaises(IndexError):
|
|
|
|
sl[7] = 2
|
|
|
|
|
|
|
|
# Assign value without format change (str -> str)
|
|
|
|
current_format = sl._get_packing_format(0)
|
|
|
|
sl[0] = 'howdy'
|
|
|
|
self.assertEqual(current_format, sl._get_packing_format(0))
|
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
# Verify attributes are readable.
|
|
|
|
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
|
|
|
|
|
|
|
|
# Exercise len().
|
|
|
|
self.assertEqual(len(sl), 7)
|
|
|
|
|
|
|
|
# Exercise index().
|
|
|
|
with warnings.catch_warnings():
|
|
|
|
# Suppress BytesWarning when comparing against b'HoWdY'.
|
|
|
|
warnings.simplefilter('ignore')
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
sl.index('100')
|
|
|
|
self.assertEqual(sl.index(100), 3)
|
|
|
|
|
|
|
|
# Exercise retrieving individual values.
|
|
|
|
self.assertEqual(sl[0], 'howdy')
|
|
|
|
self.assertEqual(sl[-2], True)
|
|
|
|
|
|
|
|
# Exercise iterability.
|
|
|
|
self.assertEqual(
|
|
|
|
tuple(sl),
|
|
|
|
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Exercise modifying individual values.
|
|
|
|
sl[3] = 42
|
|
|
|
self.assertEqual(sl[3], 42)
|
|
|
|
sl[4] = 'some' # Change type at a given position.
|
|
|
|
self.assertEqual(sl[4], 'some')
|
|
|
|
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
|
2020-04-20 15:54:55 -03:00
|
|
|
with self.assertRaisesRegex(ValueError,
|
|
|
|
"exceeds available storage"):
|
|
|
|
sl[4] = 'far too many'
|
2019-02-24 00:08:16 -04:00
|
|
|
self.assertEqual(sl[4], 'some')
|
2020-04-20 15:54:55 -03:00
|
|
|
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
|
|
|
|
self.assertEqual(sl[0], 'encodés')
|
|
|
|
self.assertEqual(sl[1], b'HoWdY') # no spillage
|
|
|
|
with self.assertRaisesRegex(ValueError,
|
|
|
|
"exceeds available storage"):
|
|
|
|
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
|
|
|
|
self.assertEqual(sl[1], b'HoWdY')
|
|
|
|
with self.assertRaisesRegex(ValueError,
|
|
|
|
"exceeds available storage"):
|
|
|
|
sl[1] = b'123456789'
|
|
|
|
self.assertEqual(sl[1], b'HoWdY')
|
2019-02-24 00:08:16 -04:00
|
|
|
|
|
|
|
# Exercise count().
|
|
|
|
with warnings.catch_warnings():
|
|
|
|
# Suppress BytesWarning when comparing against b'HoWdY'.
|
|
|
|
warnings.simplefilter('ignore')
|
|
|
|
self.assertEqual(sl.count(42), 2)
|
|
|
|
self.assertEqual(sl.count(b'HoWdY'), 1)
|
|
|
|
self.assertEqual(sl.count(b'adios'), 0)
|
|
|
|
|
|
|
|
# Exercise creating a duplicate.
|
2021-10-01 04:56:32 -03:00
|
|
|
name_duplicate = self._new_shm_name('test03_duplicate')
|
|
|
|
sl_copy = shared_memory.ShareableList(sl, name=name_duplicate)
|
2019-02-24 00:08:16 -04:00
|
|
|
try:
|
|
|
|
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
|
2021-10-01 04:56:32 -03:00
|
|
|
self.assertEqual(name_duplicate, sl_copy.shm.name)
|
2019-02-24 00:08:16 -04:00
|
|
|
self.assertEqual(list(sl), list(sl_copy))
|
|
|
|
self.assertEqual(sl.format, sl_copy.format)
|
|
|
|
sl_copy[-1] = 77
|
|
|
|
self.assertEqual(sl_copy[-1], 77)
|
|
|
|
self.assertNotEqual(sl[-1], 77)
|
|
|
|
sl_copy.shm.close()
|
|
|
|
finally:
|
|
|
|
sl_copy.shm.unlink()
|
|
|
|
|
|
|
|
# Obtain a second handle on the same ShareableList.
|
|
|
|
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
|
|
|
|
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
|
|
|
|
sl_tethered[-1] = 880
|
|
|
|
self.assertEqual(sl[-1], 880)
|
|
|
|
sl_tethered.shm.close()
|
|
|
|
|
|
|
|
sl.shm.close()
|
|
|
|
|
|
|
|
# Exercise creating an empty ShareableList.
|
|
|
|
empty_sl = shared_memory.ShareableList()
|
|
|
|
try:
|
|
|
|
self.assertEqual(len(empty_sl), 0)
|
|
|
|
self.assertEqual(empty_sl.format, '')
|
|
|
|
self.assertEqual(empty_sl.count('any'), 0)
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
empty_sl.index(None)
|
|
|
|
empty_sl.shm.close()
|
|
|
|
finally:
|
|
|
|
empty_sl.shm.unlink()
|
|
|
|
|
|
|
|
def test_shared_memory_ShareableList_pickling(self):
|
2021-10-01 07:45:59 -03:00
|
|
|
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
|
|
|
with self.subTest(proto=proto):
|
|
|
|
sl = shared_memory.ShareableList(range(10))
|
|
|
|
self.addCleanup(sl.shm.unlink)
|
|
|
|
|
|
|
|
serialized_sl = pickle.dumps(sl, protocol=proto)
|
|
|
|
deserialized_sl = pickle.loads(serialized_sl)
|
|
|
|
self.assertIsInstance(
|
|
|
|
deserialized_sl, shared_memory.ShareableList)
|
|
|
|
self.assertEqual(deserialized_sl[-1], 9)
|
|
|
|
self.assertIsNot(sl, deserialized_sl)
|
|
|
|
|
|
|
|
deserialized_sl[4] = "changed"
|
|
|
|
self.assertEqual(sl[4], "changed")
|
|
|
|
sl[3] = "newvalue"
|
|
|
|
self.assertEqual(deserialized_sl[3], "newvalue")
|
|
|
|
|
|
|
|
larger_sl = shared_memory.ShareableList(range(400))
|
|
|
|
self.addCleanup(larger_sl.shm.unlink)
|
|
|
|
serialized_larger_sl = pickle.dumps(larger_sl, protocol=proto)
|
|
|
|
self.assertEqual(len(serialized_sl), len(serialized_larger_sl))
|
|
|
|
larger_sl.shm.close()
|
|
|
|
|
|
|
|
deserialized_sl.shm.close()
|
|
|
|
sl.shm.close()
|
|
|
|
|
|
|
|
def test_shared_memory_ShareableList_pickling_dead_object(self):
|
|
|
|
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
|
|
|
with self.subTest(proto=proto):
|
|
|
|
sl = shared_memory.ShareableList(range(10))
|
|
|
|
serialized_sl = pickle.dumps(sl, protocol=proto)
|
|
|
|
|
|
|
|
# Now, we are going to kill the original object.
|
|
|
|
# So, unpickled one won't be able to attach to it.
|
|
|
|
sl.shm.close()
|
|
|
|
sl.shm.unlink()
|
|
|
|
|
|
|
|
with self.assertRaises(FileNotFoundError):
|
|
|
|
pickle.loads(serialized_sl)
|
2019-02-24 00:08:16 -04:00
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
def test_shared_memory_cleaned_after_process_termination(self):
|
|
|
|
cmd = '''if 1:
|
|
|
|
import os, time, sys
|
|
|
|
from multiprocessing import shared_memory
|
|
|
|
|
|
|
|
# Create a shared_memory segment, and send the segment name
|
|
|
|
sm = shared_memory.SharedMemory(create=True, size=10)
|
2019-07-11 15:04:09 -03:00
|
|
|
sys.stdout.write(sm.name + '\\n')
|
2019-05-10 17:59:08 -03:00
|
|
|
sys.stdout.flush()
|
|
|
|
time.sleep(100)
|
|
|
|
'''
|
2019-05-13 16:15:32 -03:00
|
|
|
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE) as p:
|
|
|
|
name = p.stdout.readline().strip().decode()
|
2019-05-10 17:59:08 -03:00
|
|
|
|
2019-05-13 16:15:32 -03:00
|
|
|
# killing abruptly processes holding reference to a shared memory
|
|
|
|
# segment should not leak the given memory segment.
|
|
|
|
p.terminate()
|
|
|
|
p.wait()
|
2019-05-10 17:59:08 -03:00
|
|
|
|
2022-06-15 06:42:10 -03:00
|
|
|
err_msg = ("A SharedMemory segment was leaked after "
|
|
|
|
"a process was abruptly terminated")
|
|
|
|
for _ in support.sleeping_retry(support.LONG_TIMEOUT, err_msg):
|
2019-05-17 15:20:07 -03:00
|
|
|
try:
|
|
|
|
smm = shared_memory.SharedMemory(name, create=False)
|
|
|
|
except FileNotFoundError:
|
|
|
|
break
|
2019-05-13 16:15:32 -03:00
|
|
|
|
|
|
|
if os.name == 'posix':
|
2021-09-21 14:49:42 -03:00
|
|
|
# Without this line it was raising warnings like:
|
|
|
|
# UserWarning: resource_tracker:
|
|
|
|
# There appear to be 1 leaked shared_memory
|
|
|
|
# objects to clean up at shutdown
|
|
|
|
# See: https://bugs.python.org/issue45209
|
|
|
|
resource_tracker.unregister(f"/{name}", "shared_memory")
|
|
|
|
|
2019-05-13 16:15:32 -03:00
|
|
|
# A warning was emitted by the subprocess' own
|
|
|
|
# resource_tracker (on Windows, shared memory segments
|
|
|
|
# are released automatically by the OS).
|
|
|
|
err = p.stderr.read().decode()
|
|
|
|
self.assertIn(
|
|
|
|
"resource_tracker: There appear to be 1 leaked "
|
|
|
|
"shared_memory objects to clean up at shutdown", err)
|
2019-05-10 17:59:08 -03:00
|
|
|
|
2023-12-05 04:11:44 -04:00
|
|
|
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
|
|
|
|
def test_shared_memory_untracking(self):
|
|
|
|
# gh-82300: When a separate Python process accesses shared memory
|
|
|
|
# with track=False, it must not cause the memory to be deleted
|
|
|
|
# when terminating.
|
|
|
|
cmd = '''if 1:
|
|
|
|
import sys
|
|
|
|
from multiprocessing.shared_memory import SharedMemory
|
|
|
|
mem = SharedMemory(create=False, name=sys.argv[1], track=False)
|
|
|
|
mem.close()
|
|
|
|
'''
|
|
|
|
mem = shared_memory.SharedMemory(create=True, size=10)
|
|
|
|
# The resource tracker shares pipes with the subprocess, and so
|
|
|
|
# err existing means that the tracker process has terminated now.
|
|
|
|
try:
|
|
|
|
rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name)
|
|
|
|
self.assertNotIn(b"resource_tracker", err)
|
|
|
|
self.assertEqual(rc, 0)
|
|
|
|
mem2 = shared_memory.SharedMemory(create=False, name=mem.name)
|
|
|
|
mem2.close()
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
mem.unlink()
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
mem.close()
|
|
|
|
|
|
|
|
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
|
|
|
|
def test_shared_memory_tracking(self):
|
|
|
|
# gh-82300: When a separate Python process accesses shared memory
|
|
|
|
# with track=True, it must cause the memory to be deleted when
|
|
|
|
# terminating.
|
|
|
|
cmd = '''if 1:
|
|
|
|
import sys
|
|
|
|
from multiprocessing.shared_memory import SharedMemory
|
|
|
|
mem = SharedMemory(create=False, name=sys.argv[1], track=True)
|
|
|
|
mem.close()
|
|
|
|
'''
|
|
|
|
mem = shared_memory.SharedMemory(create=True, size=10)
|
|
|
|
try:
|
|
|
|
rc, out, err = script_helper.assert_python_ok("-c", cmd, mem.name)
|
|
|
|
self.assertEqual(rc, 0)
|
|
|
|
self.assertIn(
|
|
|
|
b"resource_tracker: There appear to be 1 leaked "
|
|
|
|
b"shared_memory objects to clean up at shutdown", err)
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
mem.unlink()
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
resource_tracker.unregister(mem._name, "shared_memory")
|
|
|
|
mem.close()
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
2021-10-01 07:45:59 -03:00
|
|
|
# Test to verify that `Finalize` works.
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
|
|
|
|
class _TestFinalize(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
2017-06-13 12:10:39 -03:00
|
|
|
def setUp(self):
|
|
|
|
self.registry_backup = util._finalizer_registry.copy()
|
|
|
|
util._finalizer_registry.clear()
|
|
|
|
|
|
|
|
def tearDown(self):
|
2021-08-29 08:04:40 -03:00
|
|
|
gc.collect() # For PyPy or other GCs.
|
2017-06-13 12:10:39 -03:00
|
|
|
self.assertFalse(util._finalizer_registry)
|
|
|
|
util._finalizer_registry.update(self.registry_backup)
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_finalize(cls, conn):
|
2008-06-11 13:44:04 -03:00
|
|
|
class Foo(object):
|
|
|
|
pass
|
|
|
|
|
|
|
|
a = Foo()
|
|
|
|
util.Finalize(a, conn.send, args=('a',))
|
|
|
|
del a # triggers callback for a
|
2021-08-29 08:04:40 -03:00
|
|
|
gc.collect() # For PyPy or other GCs.
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
b = Foo()
|
|
|
|
close_b = util.Finalize(b, conn.send, args=('b',))
|
|
|
|
close_b() # triggers callback for b
|
|
|
|
close_b() # does nothing because callback has already been called
|
|
|
|
del b # does nothing because callback has already been called
|
2021-08-29 08:04:40 -03:00
|
|
|
gc.collect() # For PyPy or other GCs.
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
c = Foo()
|
|
|
|
util.Finalize(c, conn.send, args=('c',))
|
|
|
|
|
|
|
|
d10 = Foo()
|
|
|
|
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
|
|
|
|
|
|
|
|
d01 = Foo()
|
|
|
|
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
|
|
|
|
d02 = Foo()
|
|
|
|
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
|
|
|
|
d03 = Foo()
|
|
|
|
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
|
|
|
|
|
|
|
|
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
|
|
|
|
|
|
|
|
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
|
|
|
|
|
2011-03-16 06:05:33 -03:00
|
|
|
# call multiprocessing's cleanup function then exit process without
|
2008-06-11 13:44:04 -03:00
|
|
|
# garbage collecting locals
|
|
|
|
util._exit_function()
|
|
|
|
conn.close()
|
|
|
|
os._exit(0)
|
|
|
|
|
|
|
|
def test_finalize(self):
|
|
|
|
conn, child_conn = self.Pipe()
|
|
|
|
|
|
|
|
p = self.Process(target=self._test_finalize, args=(child_conn,))
|
2011-09-09 15:26:57 -03:00
|
|
|
p.daemon = True
|
2008-06-11 13:44:04 -03:00
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
result = [obj for obj in iter(conn.recv, 'STOP')]
|
|
|
|
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
|
|
|
|
|
2023-09-02 01:45:34 -03:00
|
|
|
@support.requires_resource('cpu')
|
2017-06-13 12:10:39 -03:00
|
|
|
def test_thread_safety(self):
|
|
|
|
# bpo-24484: _run_finalizers() should be thread-safe
|
|
|
|
def cb():
|
|
|
|
pass
|
|
|
|
|
|
|
|
class Foo(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.ref = self # create reference cycle
|
|
|
|
# insert finalizer at random key
|
|
|
|
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
|
|
|
|
|
|
|
|
finish = False
|
|
|
|
exc = None
|
|
|
|
|
|
|
|
def run_finalizers():
|
|
|
|
nonlocal exc
|
|
|
|
while not finish:
|
|
|
|
time.sleep(random.random() * 1e-1)
|
|
|
|
try:
|
|
|
|
# A GC run will eventually happen during this,
|
|
|
|
# collecting stale Foo's and mutating the registry
|
|
|
|
util._run_finalizers()
|
|
|
|
except Exception as e:
|
|
|
|
exc = e
|
|
|
|
|
|
|
|
def make_finalizers():
|
|
|
|
nonlocal exc
|
|
|
|
d = {}
|
|
|
|
while not finish:
|
|
|
|
try:
|
|
|
|
# Old Foo's get gradually replaced and later
|
|
|
|
# collected by the GC (because of the cyclic ref)
|
|
|
|
d[random.getrandbits(5)] = {Foo() for i in range(10)}
|
|
|
|
except Exception as e:
|
|
|
|
exc = e
|
|
|
|
d.clear()
|
|
|
|
|
|
|
|
old_interval = sys.getswitchinterval()
|
|
|
|
old_threshold = gc.get_threshold()
|
|
|
|
try:
|
2024-04-05 17:57:36 -03:00
|
|
|
support.setswitchinterval(1e-6)
|
2017-06-13 12:10:39 -03:00
|
|
|
gc.set_threshold(5, 5, 5)
|
|
|
|
threads = [threading.Thread(target=run_finalizers),
|
|
|
|
threading.Thread(target=make_finalizers)]
|
2020-05-27 19:10:27 -03:00
|
|
|
with threading_helper.start_threads(threads):
|
2017-06-13 12:10:39 -03:00
|
|
|
time.sleep(4.0) # Wait a bit to trigger race condition
|
|
|
|
finish = True
|
|
|
|
if exc is not None:
|
|
|
|
raise exc
|
|
|
|
finally:
|
|
|
|
sys.setswitchinterval(old_interval)
|
|
|
|
gc.set_threshold(*old_threshold)
|
|
|
|
gc.collect() # Collect remaining Foo's
|
|
|
|
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
|
|
|
# Test that from ... import * works for each module
|
|
|
|
#
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
class _TestImportStar(unittest.TestCase):
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
def get_module_names(self):
|
|
|
|
import glob
|
|
|
|
folder = os.path.dirname(multiprocessing.__file__)
|
2020-06-20 05:10:31 -03:00
|
|
|
pattern = os.path.join(glob.escape(folder), '*.py')
|
2013-08-14 11:35:41 -03:00
|
|
|
files = glob.glob(pattern)
|
|
|
|
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
|
|
|
|
modules = ['multiprocessing.' + m for m in modules]
|
|
|
|
modules.remove('multiprocessing.__init__')
|
|
|
|
modules.append('multiprocessing')
|
|
|
|
return modules
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
def test_import(self):
|
2013-08-14 11:35:41 -03:00
|
|
|
modules = self.get_module_names()
|
|
|
|
if sys.platform == 'win32':
|
|
|
|
modules.remove('multiprocessing.popen_fork')
|
|
|
|
modules.remove('multiprocessing.popen_forkserver')
|
|
|
|
modules.remove('multiprocessing.popen_spawn_posix')
|
|
|
|
else:
|
|
|
|
modules.remove('multiprocessing.popen_spawn_win32')
|
|
|
|
if not HAS_REDUCTION:
|
|
|
|
modules.remove('multiprocessing.popen_forkserver')
|
2011-09-20 15:36:51 -03:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
if c_int is None:
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
# This module requires _ctypes
|
2013-08-14 11:35:41 -03:00
|
|
|
modules.remove('multiprocessing.sharedctypes')
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
for name in modules:
|
|
|
|
__import__(name)
|
|
|
|
mod = sys.modules[name]
|
2013-08-14 11:35:41 -03:00
|
|
|
self.assertTrue(hasattr(mod, '__all__'), name)
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
for attr in mod.__all__:
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertTrue(
|
|
|
|
hasattr(mod, attr),
|
|
|
|
'%r does not have attribute %r' % (mod, attr)
|
|
|
|
)
|
|
|
|
|
|
|
|
#
|
|
|
|
# Quick test that logging works -- does not test logging output
|
|
|
|
#
|
|
|
|
|
|
|
|
class _TestLogging(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
|
|
|
def test_enable_logging(self):
|
|
|
|
logger = multiprocessing.get_logger()
|
|
|
|
logger.setLevel(util.SUBWARNING)
|
|
|
|
self.assertTrue(logger is not None)
|
|
|
|
logger.debug('this will not be printed')
|
|
|
|
logger.info('nor will this')
|
|
|
|
logger.setLevel(LOG_LEVEL)
|
|
|
|
|
2010-11-02 20:50:11 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_level(cls, conn):
|
2008-06-11 13:44:04 -03:00
|
|
|
logger = multiprocessing.get_logger()
|
|
|
|
conn.send(logger.getEffectiveLevel())
|
|
|
|
|
|
|
|
def test_level(self):
|
|
|
|
LEVEL1 = 32
|
|
|
|
LEVEL2 = 37
|
|
|
|
|
|
|
|
logger = multiprocessing.get_logger()
|
|
|
|
root_logger = logging.getLogger()
|
|
|
|
root_level = root_logger.level
|
|
|
|
|
|
|
|
reader, writer = multiprocessing.Pipe(duplex=False)
|
|
|
|
|
|
|
|
logger.setLevel(LEVEL1)
|
2011-09-09 15:26:57 -03:00
|
|
|
p = self.Process(target=self._test_level, args=(writer,))
|
|
|
|
p.start()
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertEqual(LEVEL1, reader.recv())
|
2017-07-24 08:02:20 -03:00
|
|
|
p.join()
|
|
|
|
p.close()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
logger.setLevel(logging.NOTSET)
|
|
|
|
root_logger.setLevel(LEVEL2)
|
2011-09-09 15:26:57 -03:00
|
|
|
p = self.Process(target=self._test_level, args=(writer,))
|
|
|
|
p.start()
|
2008-06-11 13:44:04 -03:00
|
|
|
self.assertEqual(LEVEL2, reader.recv())
|
2017-07-24 08:02:20 -03:00
|
|
|
p.join()
|
|
|
|
p.close()
|
2008-06-11 13:44:04 -03:00
|
|
|
|
|
|
|
root_logger.setLevel(root_level)
|
|
|
|
logger.setLevel(level=LOG_LEVEL)
|
|
|
|
|
2023-12-24 06:04:12 -04:00
|
|
|
def test_filename(self):
|
|
|
|
logger = multiprocessing.get_logger()
|
|
|
|
original_level = logger.level
|
|
|
|
try:
|
|
|
|
logger.setLevel(util.DEBUG)
|
|
|
|
stream = io.StringIO()
|
|
|
|
handler = logging.StreamHandler(stream)
|
|
|
|
logging_format = '[%(levelname)s] [%(filename)s] %(message)s'
|
|
|
|
handler.setFormatter(logging.Formatter(logging_format))
|
|
|
|
logger.addHandler(handler)
|
|
|
|
logger.info('1')
|
|
|
|
util.info('2')
|
|
|
|
logger.debug('3')
|
|
|
|
filename = os.path.basename(__file__)
|
|
|
|
log_record = stream.getvalue()
|
|
|
|
self.assertIn(f'[INFO] [{filename}] 1', log_record)
|
|
|
|
self.assertIn(f'[INFO] [{filename}] 2', log_record)
|
|
|
|
self.assertIn(f'[DEBUG] [{filename}] 3', log_record)
|
|
|
|
finally:
|
|
|
|
logger.setLevel(original_level)
|
|
|
|
logger.removeHandler(handler)
|
|
|
|
handler.close()
|
|
|
|
|
2009-11-21 14:09:38 -04:00
|
|
|
|
2009-11-24 10:22:24 -04:00
|
|
|
# class _TestLoggingProcessName(BaseTestCase):
|
|
|
|
#
|
|
|
|
# def handle(self, record):
|
|
|
|
# assert record.processName == multiprocessing.current_process().name
|
|
|
|
# self.__handled = True
|
|
|
|
#
|
|
|
|
# def test_logging(self):
|
|
|
|
# handler = logging.Handler()
|
|
|
|
# handler.handle = self.handle
|
|
|
|
# self.__handled = False
|
|
|
|
# # Bypass getLogger() and side-effects
|
|
|
|
# logger = logging.getLoggerClass()(
|
|
|
|
# 'multiprocessing.test.TestLoggingProcessName')
|
|
|
|
# logger.addHandler(handler)
|
|
|
|
# logger.propagate = False
|
|
|
|
#
|
|
|
|
# logger.warn('foo')
|
|
|
|
# assert self.__handled
|
2009-11-21 14:09:38 -04:00
|
|
|
|
2013-02-26 08:39:57 -04:00
|
|
|
#
|
|
|
|
# Check that Process.join() retries if os.waitpid() fails with EINTR
|
|
|
|
#
|
|
|
|
|
|
|
|
class _TestPollEintr(BaseTestCase):
|
|
|
|
|
|
|
|
ALLOWED_TYPES = ('processes',)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _killer(cls, pid):
|
2013-08-28 09:50:19 -03:00
|
|
|
time.sleep(0.1)
|
2013-02-26 08:39:57 -04:00
|
|
|
os.kill(pid, signal.SIGUSR1)
|
|
|
|
|
|
|
|
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
|
|
|
|
def test_poll_eintr(self):
|
|
|
|
got_signal = [False]
|
|
|
|
def record(*args):
|
|
|
|
got_signal[0] = True
|
|
|
|
pid = os.getpid()
|
|
|
|
oldhandler = signal.signal(signal.SIGUSR1, record)
|
|
|
|
try:
|
|
|
|
killer = self.Process(target=self._killer, args=(pid,))
|
|
|
|
killer.start()
|
2013-08-28 09:50:19 -03:00
|
|
|
try:
|
|
|
|
p = self.Process(target=time.sleep, args=(2,))
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
finally:
|
|
|
|
killer.join()
|
2013-02-26 08:39:57 -04:00
|
|
|
self.assertTrue(got_signal[0])
|
|
|
|
self.assertEqual(p.exitcode, 0)
|
|
|
|
finally:
|
|
|
|
signal.signal(signal.SIGUSR1, oldhandler)
|
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
#
|
2009-01-19 12:23:53 -04:00
|
|
|
# Test to verify handle verification, see issue 3321
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestInvalidHandle(unittest.TestCase):
|
|
|
|
|
2018-06-25 21:11:06 -03:00
|
|
|
@unittest.skipIf(WIN32, "skipped on Windows")
|
2009-01-19 12:23:53 -04:00
|
|
|
def test_invalid_handles(self):
|
2011-05-09 12:04:27 -03:00
|
|
|
conn = multiprocessing.connection.Connection(44977608)
|
2013-09-06 16:12:22 -03:00
|
|
|
# check that poll() doesn't crash
|
2011-05-09 12:04:27 -03:00
|
|
|
try:
|
2013-09-06 16:12:22 -03:00
|
|
|
conn.poll()
|
|
|
|
except (ValueError, OSError):
|
|
|
|
pass
|
2011-05-09 12:04:27 -03:00
|
|
|
finally:
|
|
|
|
# Hack private attribute _handle to avoid printing an error
|
|
|
|
# in conn.__del__
|
|
|
|
conn._handle = None
|
2012-12-25 10:47:37 -04:00
|
|
|
self.assertRaises((ValueError, OSError),
|
2011-05-09 12:04:27 -03:00
|
|
|
multiprocessing.connection.Connection, -1)
|
Merged revisions 79297,79310,79382,79425-79427,79450 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r79297 | florent.xicluna | 2010-03-22 18:18:18 +0100 (lun, 22 mar 2010) | 2 lines
#7668: Fix test_httpservers failure when sys.executable contains non-ASCII bytes.
........
r79310 | florent.xicluna | 2010-03-22 23:52:11 +0100 (lun, 22 mar 2010) | 2 lines
Issue #8205: Remove the "Modules" directory from sys.path when Python is running from the build directory (POSIX only).
........
r79382 | florent.xicluna | 2010-03-24 20:33:25 +0100 (mer, 24 mar 2010) | 2 lines
Skip tests which depend on multiprocessing.sharedctypes, if _ctypes is not available.
........
r79425 | florent.xicluna | 2010-03-25 21:32:07 +0100 (jeu, 25 mar 2010) | 2 lines
Syntax cleanup `== None` -> `is None`
........
r79426 | florent.xicluna | 2010-03-25 21:33:49 +0100 (jeu, 25 mar 2010) | 2 lines
#8207: Fix test_pep277 on OS X
........
r79427 | florent.xicluna | 2010-03-25 21:39:10 +0100 (jeu, 25 mar 2010) | 2 lines
Fix test_unittest and test_warnings when running "python -Werror -m test.regrtest"
........
r79450 | florent.xicluna | 2010-03-26 20:32:44 +0100 (ven, 26 mar 2010) | 2 lines
Ensure that the failed or unexpected tests are sorted before printing.
........
2010-03-27 21:25:02 -03:00
|
|
|
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2012-10-08 10:56:24 -03:00
|
|
|
|
2023-05-20 20:33:09 -03:00
|
|
|
@hashlib_helper.requires_hashdigest('sha256')
|
2008-08-24 22:53:32 -03:00
|
|
|
class OtherTest(unittest.TestCase):
|
|
|
|
# TODO: add more tests for deliver/answer challenge.
|
|
|
|
def test_deliver_challenge_auth_failure(self):
|
|
|
|
class _FakeConnection(object):
|
|
|
|
def recv_bytes(self, size):
|
2008-08-25 00:05:54 -03:00
|
|
|
return b'something bogus'
|
2008-08-24 22:53:32 -03:00
|
|
|
def send_bytes(self, data):
|
|
|
|
pass
|
|
|
|
self.assertRaises(multiprocessing.AuthenticationError,
|
|
|
|
multiprocessing.connection.deliver_challenge,
|
|
|
|
_FakeConnection(), b'abc')
|
|
|
|
|
|
|
|
def test_answer_challenge_auth_failure(self):
|
|
|
|
class _FakeConnection(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.count = 0
|
|
|
|
def recv_bytes(self, size):
|
|
|
|
self.count += 1
|
|
|
|
if self.count == 1:
|
2023-05-20 20:33:09 -03:00
|
|
|
return multiprocessing.connection._CHALLENGE
|
2008-08-24 22:53:32 -03:00
|
|
|
elif self.count == 2:
|
2008-08-25 00:05:54 -03:00
|
|
|
return b'something bogus'
|
|
|
|
return b''
|
2008-08-24 22:53:32 -03:00
|
|
|
def send_bytes(self, data):
|
|
|
|
pass
|
|
|
|
self.assertRaises(multiprocessing.AuthenticationError,
|
|
|
|
multiprocessing.connection.answer_challenge,
|
|
|
|
_FakeConnection(), b'abc')
|
|
|
|
|
2023-05-20 20:33:09 -03:00
|
|
|
|
|
|
|
@hashlib_helper.requires_hashdigest('md5')
|
|
|
|
@hashlib_helper.requires_hashdigest('sha256')
|
|
|
|
class ChallengeResponseTest(unittest.TestCase):
|
|
|
|
authkey = b'supadupasecretkey'
|
|
|
|
|
|
|
|
def create_response(self, message):
|
|
|
|
return multiprocessing.connection._create_response(
|
|
|
|
self.authkey, message
|
|
|
|
)
|
|
|
|
|
|
|
|
def verify_challenge(self, message, response):
|
|
|
|
return multiprocessing.connection._verify_challenge(
|
|
|
|
self.authkey, message, response
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_challengeresponse(self):
|
|
|
|
for algo in [None, "md5", "sha256"]:
|
|
|
|
with self.subTest(f"{algo=}"):
|
|
|
|
msg = b'is-twenty-bytes-long' # The length of a legacy message.
|
|
|
|
if algo:
|
|
|
|
prefix = b'{%s}' % algo.encode("ascii")
|
|
|
|
else:
|
|
|
|
prefix = b''
|
|
|
|
msg = prefix + msg
|
|
|
|
response = self.create_response(msg)
|
|
|
|
if not response.startswith(prefix):
|
|
|
|
self.fail(response)
|
|
|
|
self.verify_challenge(msg, response)
|
|
|
|
|
|
|
|
# TODO(gpshead): We need integration tests for handshakes between modern
|
|
|
|
# deliver_challenge() and verify_response() code and connections running a
|
|
|
|
# test-local copy of the legacy Python <=3.11 implementations.
|
|
|
|
|
|
|
|
# TODO(gpshead): properly annotate tests for requires_hashdigest rather than
|
|
|
|
# only running these on a platform supporting everything. otherwise logic
|
|
|
|
# issues preventing it from working on FIPS mode setups will be hidden.
|
|
|
|
|
Merged revisions 70912,70944,70968,71033,71041,71208,71263,71286,71395-71396,71405-71406,71485,71492,71494 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70912 | georg.brandl | 2009-03-31 17:35:46 -0500 (Tue, 31 Mar 2009) | 1 line
#5617: add a handy function to print a unicode string to gdbinit.
........
r70944 | georg.brandl | 2009-03-31 23:32:39 -0500 (Tue, 31 Mar 2009) | 1 line
#5631: add upload to list of possible commands, which is presented in --help-commands.
........
r70968 | michael.foord | 2009-04-01 13:25:38 -0500 (Wed, 01 Apr 2009) | 1 line
Adding Wing project file
........
r71033 | brett.cannon | 2009-04-01 22:34:53 -0500 (Wed, 01 Apr 2009) | 3 lines
Fix two issues introduced by issue #71031 by changing the signature of
PyImport_AppendInittab() to take a const char *.
........
r71041 | jesse.noller | 2009-04-02 00:17:26 -0500 (Thu, 02 Apr 2009) | 1 line
Add custom initializer argument to multiprocess.Manager*, courtesy of lekma
........
r71208 | michael.foord | 2009-04-04 20:15:01 -0500 (Sat, 04 Apr 2009) | 4 lines
Change the way unittest.TestSuite use their tests to always access them through iteration. Non behavior changing, this allows you to create custom subclasses that override __iter__.
Issue #5693
........
r71263 | michael.foord | 2009-04-05 14:19:28 -0500 (Sun, 05 Apr 2009) | 4 lines
Adding assertIs and assertIsNot methods to unittest.TestCase
Issue #2578
........
r71286 | tarek.ziade | 2009-04-05 17:04:38 -0500 (Sun, 05 Apr 2009) | 1 line
added a simplest test to distutils.spawn._nt_quote_args
........
r71395 | benjamin.peterson | 2009-04-08 08:27:29 -0500 (Wed, 08 Apr 2009) | 1 line
these must be installed to correctly run tests
........
r71396 | benjamin.peterson | 2009-04-08 08:29:41 -0500 (Wed, 08 Apr 2009) | 1 line
fix syntax
........
r71405 | andrew.kuchling | 2009-04-09 06:22:47 -0500 (Thu, 09 Apr 2009) | 1 line
Add items
........
r71406 | andrew.kuchling | 2009-04-09 06:23:36 -0500 (Thu, 09 Apr 2009) | 1 line
Typo fixes
........
r71485 | andrew.kuchling | 2009-04-11 11:12:23 -0500 (Sat, 11 Apr 2009) | 1 line
Add various items
........
r71492 | georg.brandl | 2009-04-11 13:19:27 -0500 (Sat, 11 Apr 2009) | 1 line
Take credit for a patch of mine.
........
r71494 | benjamin.peterson | 2009-04-11 14:31:00 -0500 (Sat, 11 Apr 2009) | 1 line
ignore py3_test_grammar when compiling the library
........
2009-04-11 17:45:40 -03:00
|
|
|
#
|
|
|
|
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
|
|
|
|
#
|
|
|
|
|
|
|
|
def initializer(ns):
|
|
|
|
ns.test += 1
|
|
|
|
|
2023-05-20 20:33:09 -03:00
|
|
|
@hashlib_helper.requires_hashdigest('sha256')
|
Merged revisions 70912,70944,70968,71033,71041,71208,71263,71286,71395-71396,71405-71406,71485,71492,71494 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70912 | georg.brandl | 2009-03-31 17:35:46 -0500 (Tue, 31 Mar 2009) | 1 line
#5617: add a handy function to print a unicode string to gdbinit.
........
r70944 | georg.brandl | 2009-03-31 23:32:39 -0500 (Tue, 31 Mar 2009) | 1 line
#5631: add upload to list of possible commands, which is presented in --help-commands.
........
r70968 | michael.foord | 2009-04-01 13:25:38 -0500 (Wed, 01 Apr 2009) | 1 line
Adding Wing project file
........
r71033 | brett.cannon | 2009-04-01 22:34:53 -0500 (Wed, 01 Apr 2009) | 3 lines
Fix two issues introduced by issue #71031 by changing the signature of
PyImport_AppendInittab() to take a const char *.
........
r71041 | jesse.noller | 2009-04-02 00:17:26 -0500 (Thu, 02 Apr 2009) | 1 line
Add custom initializer argument to multiprocess.Manager*, courtesy of lekma
........
r71208 | michael.foord | 2009-04-04 20:15:01 -0500 (Sat, 04 Apr 2009) | 4 lines
Change the way unittest.TestSuite use their tests to always access them through iteration. Non behavior changing, this allows you to create custom subclasses that override __iter__.
Issue #5693
........
r71263 | michael.foord | 2009-04-05 14:19:28 -0500 (Sun, 05 Apr 2009) | 4 lines
Adding assertIs and assertIsNot methods to unittest.TestCase
Issue #2578
........
r71286 | tarek.ziade | 2009-04-05 17:04:38 -0500 (Sun, 05 Apr 2009) | 1 line
added a simplest test to distutils.spawn._nt_quote_args
........
r71395 | benjamin.peterson | 2009-04-08 08:27:29 -0500 (Wed, 08 Apr 2009) | 1 line
these must be installed to correctly run tests
........
r71396 | benjamin.peterson | 2009-04-08 08:29:41 -0500 (Wed, 08 Apr 2009) | 1 line
fix syntax
........
r71405 | andrew.kuchling | 2009-04-09 06:22:47 -0500 (Thu, 09 Apr 2009) | 1 line
Add items
........
r71406 | andrew.kuchling | 2009-04-09 06:23:36 -0500 (Thu, 09 Apr 2009) | 1 line
Typo fixes
........
r71485 | andrew.kuchling | 2009-04-11 11:12:23 -0500 (Sat, 11 Apr 2009) | 1 line
Add various items
........
r71492 | georg.brandl | 2009-04-11 13:19:27 -0500 (Sat, 11 Apr 2009) | 1 line
Take credit for a patch of mine.
........
r71494 | benjamin.peterson | 2009-04-11 14:31:00 -0500 (Sat, 11 Apr 2009) | 1 line
ignore py3_test_grammar when compiling the library
........
2009-04-11 17:45:40 -03:00
|
|
|
class TestInitializers(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.mgr = multiprocessing.Manager()
|
|
|
|
self.ns = self.mgr.Namespace()
|
|
|
|
self.ns.test = 0
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
self.mgr.shutdown()
|
2012-05-03 14:29:02 -03:00
|
|
|
self.mgr.join()
|
Merged revisions 70912,70944,70968,71033,71041,71208,71263,71286,71395-71396,71405-71406,71485,71492,71494 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70912 | georg.brandl | 2009-03-31 17:35:46 -0500 (Tue, 31 Mar 2009) | 1 line
#5617: add a handy function to print a unicode string to gdbinit.
........
r70944 | georg.brandl | 2009-03-31 23:32:39 -0500 (Tue, 31 Mar 2009) | 1 line
#5631: add upload to list of possible commands, which is presented in --help-commands.
........
r70968 | michael.foord | 2009-04-01 13:25:38 -0500 (Wed, 01 Apr 2009) | 1 line
Adding Wing project file
........
r71033 | brett.cannon | 2009-04-01 22:34:53 -0500 (Wed, 01 Apr 2009) | 3 lines
Fix two issues introduced by issue #71031 by changing the signature of
PyImport_AppendInittab() to take a const char *.
........
r71041 | jesse.noller | 2009-04-02 00:17:26 -0500 (Thu, 02 Apr 2009) | 1 line
Add custom initializer argument to multiprocess.Manager*, courtesy of lekma
........
r71208 | michael.foord | 2009-04-04 20:15:01 -0500 (Sat, 04 Apr 2009) | 4 lines
Change the way unittest.TestSuite use their tests to always access them through iteration. Non behavior changing, this allows you to create custom subclasses that override __iter__.
Issue #5693
........
r71263 | michael.foord | 2009-04-05 14:19:28 -0500 (Sun, 05 Apr 2009) | 4 lines
Adding assertIs and assertIsNot methods to unittest.TestCase
Issue #2578
........
r71286 | tarek.ziade | 2009-04-05 17:04:38 -0500 (Sun, 05 Apr 2009) | 1 line
added a simplest test to distutils.spawn._nt_quote_args
........
r71395 | benjamin.peterson | 2009-04-08 08:27:29 -0500 (Wed, 08 Apr 2009) | 1 line
these must be installed to correctly run tests
........
r71396 | benjamin.peterson | 2009-04-08 08:29:41 -0500 (Wed, 08 Apr 2009) | 1 line
fix syntax
........
r71405 | andrew.kuchling | 2009-04-09 06:22:47 -0500 (Thu, 09 Apr 2009) | 1 line
Add items
........
r71406 | andrew.kuchling | 2009-04-09 06:23:36 -0500 (Thu, 09 Apr 2009) | 1 line
Typo fixes
........
r71485 | andrew.kuchling | 2009-04-11 11:12:23 -0500 (Sat, 11 Apr 2009) | 1 line
Add various items
........
r71492 | georg.brandl | 2009-04-11 13:19:27 -0500 (Sat, 11 Apr 2009) | 1 line
Take credit for a patch of mine.
........
r71494 | benjamin.peterson | 2009-04-11 14:31:00 -0500 (Sat, 11 Apr 2009) | 1 line
ignore py3_test_grammar when compiling the library
........
2009-04-11 17:45:40 -03:00
|
|
|
|
|
|
|
def test_manager_initializer(self):
|
|
|
|
m = multiprocessing.managers.SyncManager()
|
|
|
|
self.assertRaises(TypeError, m.start, 1)
|
|
|
|
m.start(initializer, (self.ns,))
|
|
|
|
self.assertEqual(self.ns.test, 1)
|
|
|
|
m.shutdown()
|
2012-05-03 14:29:02 -03:00
|
|
|
m.join()
|
Merged revisions 70912,70944,70968,71033,71041,71208,71263,71286,71395-71396,71405-71406,71485,71492,71494 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r70912 | georg.brandl | 2009-03-31 17:35:46 -0500 (Tue, 31 Mar 2009) | 1 line
#5617: add a handy function to print a unicode string to gdbinit.
........
r70944 | georg.brandl | 2009-03-31 23:32:39 -0500 (Tue, 31 Mar 2009) | 1 line
#5631: add upload to list of possible commands, which is presented in --help-commands.
........
r70968 | michael.foord | 2009-04-01 13:25:38 -0500 (Wed, 01 Apr 2009) | 1 line
Adding Wing project file
........
r71033 | brett.cannon | 2009-04-01 22:34:53 -0500 (Wed, 01 Apr 2009) | 3 lines
Fix two issues introduced by issue #71031 by changing the signature of
PyImport_AppendInittab() to take a const char *.
........
r71041 | jesse.noller | 2009-04-02 00:17:26 -0500 (Thu, 02 Apr 2009) | 1 line
Add custom initializer argument to multiprocess.Manager*, courtesy of lekma
........
r71208 | michael.foord | 2009-04-04 20:15:01 -0500 (Sat, 04 Apr 2009) | 4 lines
Change the way unittest.TestSuite use their tests to always access them through iteration. Non behavior changing, this allows you to create custom subclasses that override __iter__.
Issue #5693
........
r71263 | michael.foord | 2009-04-05 14:19:28 -0500 (Sun, 05 Apr 2009) | 4 lines
Adding assertIs and assertIsNot methods to unittest.TestCase
Issue #2578
........
r71286 | tarek.ziade | 2009-04-05 17:04:38 -0500 (Sun, 05 Apr 2009) | 1 line
added a simplest test to distutils.spawn._nt_quote_args
........
r71395 | benjamin.peterson | 2009-04-08 08:27:29 -0500 (Wed, 08 Apr 2009) | 1 line
these must be installed to correctly run tests
........
r71396 | benjamin.peterson | 2009-04-08 08:29:41 -0500 (Wed, 08 Apr 2009) | 1 line
fix syntax
........
r71405 | andrew.kuchling | 2009-04-09 06:22:47 -0500 (Thu, 09 Apr 2009) | 1 line
Add items
........
r71406 | andrew.kuchling | 2009-04-09 06:23:36 -0500 (Thu, 09 Apr 2009) | 1 line
Typo fixes
........
r71485 | andrew.kuchling | 2009-04-11 11:12:23 -0500 (Sat, 11 Apr 2009) | 1 line
Add various items
........
r71492 | georg.brandl | 2009-04-11 13:19:27 -0500 (Sat, 11 Apr 2009) | 1 line
Take credit for a patch of mine.
........
r71494 | benjamin.peterson | 2009-04-11 14:31:00 -0500 (Sat, 11 Apr 2009) | 1 line
ignore py3_test_grammar when compiling the library
........
2009-04-11 17:45:40 -03:00
|
|
|
|
|
|
|
def test_pool_initializer(self):
|
|
|
|
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
|
|
|
|
p = multiprocessing.Pool(1, initializer, (self.ns,))
|
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
self.assertEqual(self.ns.test, 1)
|
|
|
|
|
2009-07-17 09:07:01 -03:00
|
|
|
#
|
|
|
|
# Issue 5155, 5313, 5331: Test process in processes
|
|
|
|
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
|
|
|
|
#
|
|
|
|
|
2013-09-29 13:29:56 -03:00
|
|
|
def _this_sub_process(q):
|
2009-07-17 09:07:01 -03:00
|
|
|
try:
|
|
|
|
item = q.get(block=False)
|
|
|
|
except pyqueue.Empty:
|
|
|
|
pass
|
|
|
|
|
2017-07-24 21:40:55 -03:00
|
|
|
def _test_process():
|
2013-09-29 13:29:56 -03:00
|
|
|
queue = multiprocessing.Queue()
|
|
|
|
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
|
|
|
|
subProc.daemon = True
|
|
|
|
subProc.start()
|
|
|
|
subProc.join()
|
|
|
|
|
2009-07-17 09:07:01 -03:00
|
|
|
def _afunc(x):
|
|
|
|
return x*x
|
|
|
|
|
|
|
|
def pool_in_process():
|
|
|
|
pool = multiprocessing.Pool(processes=4)
|
|
|
|
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
|
2012-05-02 15:36:11 -03:00
|
|
|
pool.close()
|
|
|
|
pool.join()
|
2009-07-17 09:07:01 -03:00
|
|
|
|
|
|
|
class _file_like(object):
|
|
|
|
def __init__(self, delegate):
|
|
|
|
self._delegate = delegate
|
|
|
|
self._pid = None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def cache(self):
|
|
|
|
pid = os.getpid()
|
|
|
|
# There are no race conditions since fork keeps only the running thread
|
|
|
|
if pid != self._pid:
|
|
|
|
self._pid = pid
|
|
|
|
self._cache = []
|
|
|
|
return self._cache
|
|
|
|
|
|
|
|
def write(self, data):
|
|
|
|
self.cache.append(data)
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
self._delegate.write(''.join(self.cache))
|
|
|
|
self._cache = []
|
|
|
|
|
|
|
|
class TestStdinBadfiledescriptor(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_queue_in_process(self):
|
2017-07-24 21:40:55 -03:00
|
|
|
proc = multiprocessing.Process(target=_test_process)
|
2009-07-17 09:07:01 -03:00
|
|
|
proc.start()
|
|
|
|
proc.join()
|
|
|
|
|
|
|
|
def test_pool_in_process(self):
|
|
|
|
p = multiprocessing.Process(target=pool_in_process)
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
|
|
|
|
def test_flushing(self):
|
|
|
|
sio = io.StringIO()
|
|
|
|
flike = _file_like(sio)
|
|
|
|
flike.write('foo')
|
|
|
|
proc = multiprocessing.Process(target=lambda: flike.flush())
|
|
|
|
flike.flush()
|
|
|
|
assert sio.getvalue() == 'foo'
|
|
|
|
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
class TestWait(unittest.TestCase):
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _child_test_wait(cls, w, slow):
|
|
|
|
for i in range(10):
|
|
|
|
if slow:
|
2023-10-10 22:49:09 -03:00
|
|
|
time.sleep(random.random() * 0.100)
|
2012-03-05 14:28:37 -04:00
|
|
|
w.send((i, os.getpid()))
|
|
|
|
w.close()
|
|
|
|
|
|
|
|
def test_wait(self, slow=False):
|
|
|
|
from multiprocessing.connection import wait
|
|
|
|
readers = []
|
|
|
|
procs = []
|
|
|
|
messages = []
|
|
|
|
|
|
|
|
for i in range(4):
|
2012-03-06 08:43:24 -04:00
|
|
|
r, w = multiprocessing.Pipe(duplex=False)
|
|
|
|
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
|
2012-03-05 14:28:37 -04:00
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
w.close()
|
|
|
|
readers.append(r)
|
|
|
|
procs.append(p)
|
2012-03-06 08:42:35 -04:00
|
|
|
self.addCleanup(p.join)
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
while readers:
|
|
|
|
for r in wait(readers):
|
|
|
|
try:
|
|
|
|
msg = r.recv()
|
|
|
|
except EOFError:
|
|
|
|
readers.remove(r)
|
|
|
|
r.close()
|
|
|
|
else:
|
|
|
|
messages.append(msg)
|
|
|
|
|
|
|
|
messages.sort()
|
|
|
|
expected = sorted((i, p.pid) for i in range(10) for p in procs)
|
|
|
|
self.assertEqual(messages, expected)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _child_test_wait_socket(cls, address, slow):
|
|
|
|
s = socket.socket()
|
|
|
|
s.connect(address)
|
|
|
|
for i in range(10):
|
|
|
|
if slow:
|
2023-10-10 22:49:09 -03:00
|
|
|
time.sleep(random.random() * 0.100)
|
2012-03-05 14:28:37 -04:00
|
|
|
s.sendall(('%s\n' % i).encode('ascii'))
|
|
|
|
s.close()
|
|
|
|
|
|
|
|
def test_wait_socket(self, slow=False):
|
|
|
|
from multiprocessing.connection import wait
|
2020-04-25 04:06:29 -03:00
|
|
|
l = socket.create_server((socket_helper.HOST, 0))
|
2013-08-21 19:39:46 -03:00
|
|
|
addr = l.getsockname()
|
2012-03-05 14:28:37 -04:00
|
|
|
readers = []
|
|
|
|
procs = []
|
|
|
|
dic = {}
|
|
|
|
|
|
|
|
for i in range(4):
|
2012-03-06 08:43:24 -04:00
|
|
|
p = multiprocessing.Process(target=self._child_test_wait_socket,
|
|
|
|
args=(addr, slow))
|
2012-03-05 14:28:37 -04:00
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
procs.append(p)
|
2012-03-06 08:42:35 -04:00
|
|
|
self.addCleanup(p.join)
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
for i in range(4):
|
|
|
|
r, _ = l.accept()
|
|
|
|
readers.append(r)
|
|
|
|
dic[r] = []
|
|
|
|
l.close()
|
|
|
|
|
|
|
|
while readers:
|
|
|
|
for r in wait(readers):
|
|
|
|
msg = r.recv(32)
|
|
|
|
if not msg:
|
|
|
|
readers.remove(r)
|
|
|
|
r.close()
|
|
|
|
else:
|
|
|
|
dic[r].append(msg)
|
|
|
|
|
|
|
|
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
|
|
|
|
for v in dic.values():
|
|
|
|
self.assertEqual(b''.join(v), expected)
|
|
|
|
|
|
|
|
def test_wait_slow(self):
|
|
|
|
self.test_wait(True)
|
|
|
|
|
|
|
|
def test_wait_socket_slow(self):
|
2012-05-08 12:08:07 -03:00
|
|
|
self.test_wait_socket(True)
|
2012-03-05 14:28:37 -04:00
|
|
|
|
2023-09-05 11:56:30 -03:00
|
|
|
@support.requires_resource('walltime')
|
2012-03-05 14:28:37 -04:00
|
|
|
def test_wait_timeout(self):
|
|
|
|
from multiprocessing.connection import wait
|
|
|
|
|
2023-10-10 22:49:09 -03:00
|
|
|
timeout = 5.0 # seconds
|
2012-03-05 14:28:37 -04:00
|
|
|
a, b = multiprocessing.Pipe()
|
|
|
|
|
2018-12-17 04:36:36 -04:00
|
|
|
start = time.monotonic()
|
2023-10-10 22:49:09 -03:00
|
|
|
res = wait([a, b], timeout)
|
2018-12-17 04:36:36 -04:00
|
|
|
delta = time.monotonic() - start
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
self.assertEqual(res, [])
|
2023-10-10 22:49:09 -03:00
|
|
|
self.assertGreater(delta, timeout - CLOCK_RES)
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
b.send(None)
|
2012-05-04 05:44:39 -03:00
|
|
|
res = wait([a, b], 20)
|
2012-03-05 14:28:37 -04:00
|
|
|
self.assertEqual(res, [a])
|
|
|
|
|
2012-05-04 05:44:39 -03:00
|
|
|
@classmethod
|
|
|
|
def signal_and_sleep(cls, sem, period):
|
|
|
|
sem.release()
|
|
|
|
time.sleep(period)
|
|
|
|
|
2023-09-05 11:56:30 -03:00
|
|
|
@support.requires_resource('walltime')
|
2012-03-05 14:28:37 -04:00
|
|
|
def test_wait_integer(self):
|
|
|
|
from multiprocessing.connection import wait
|
|
|
|
|
2012-05-04 05:44:39 -03:00
|
|
|
expected = 3
|
2013-01-13 21:24:05 -04:00
|
|
|
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
|
2012-05-04 05:44:39 -03:00
|
|
|
sem = multiprocessing.Semaphore(0)
|
2012-03-05 14:28:37 -04:00
|
|
|
a, b = multiprocessing.Pipe()
|
2012-05-04 05:44:39 -03:00
|
|
|
p = multiprocessing.Process(target=self.signal_and_sleep,
|
|
|
|
args=(sem, expected))
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
p.start()
|
|
|
|
self.assertIsInstance(p.sentinel, int)
|
2012-05-04 05:44:39 -03:00
|
|
|
self.assertTrue(sem.acquire(timeout=20))
|
2012-03-05 14:28:37 -04:00
|
|
|
|
2018-12-17 04:36:36 -04:00
|
|
|
start = time.monotonic()
|
2012-03-05 14:28:37 -04:00
|
|
|
res = wait([a, p.sentinel, b], expected + 20)
|
2018-12-17 04:36:36 -04:00
|
|
|
delta = time.monotonic() - start
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
self.assertEqual(res, [p.sentinel])
|
2012-03-09 13:40:15 -04:00
|
|
|
self.assertLess(delta, expected + 2)
|
|
|
|
self.assertGreater(delta, expected - 2)
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
a.send(None)
|
|
|
|
|
2018-12-17 04:36:36 -04:00
|
|
|
start = time.monotonic()
|
2012-03-05 14:28:37 -04:00
|
|
|
res = wait([a, p.sentinel, b], 20)
|
2018-12-17 04:36:36 -04:00
|
|
|
delta = time.monotonic() - start
|
2012-03-05 14:28:37 -04:00
|
|
|
|
2012-12-31 12:38:17 -04:00
|
|
|
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
|
2012-03-09 13:40:15 -04:00
|
|
|
self.assertLess(delta, 0.4)
|
2012-03-05 14:28:37 -04:00
|
|
|
|
|
|
|
b.send(None)
|
|
|
|
|
2018-12-17 04:36:36 -04:00
|
|
|
start = time.monotonic()
|
2012-03-05 14:28:37 -04:00
|
|
|
res = wait([a, p.sentinel, b], 20)
|
2018-12-17 04:36:36 -04:00
|
|
|
delta = time.monotonic() - start
|
2012-03-05 14:28:37 -04:00
|
|
|
|
2012-12-31 12:38:17 -04:00
|
|
|
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
|
2012-03-09 13:40:15 -04:00
|
|
|
self.assertLess(delta, 0.4)
|
2012-03-05 14:28:37 -04:00
|
|
|
|
2012-05-04 05:44:39 -03:00
|
|
|
p.terminate()
|
2012-03-05 14:28:37 -04:00
|
|
|
p.join()
|
|
|
|
|
2012-05-10 12:11:12 -03:00
|
|
|
def test_neg_timeout(self):
|
|
|
|
from multiprocessing.connection import wait
|
|
|
|
a, b = multiprocessing.Pipe()
|
2018-12-17 04:36:36 -04:00
|
|
|
t = time.monotonic()
|
2012-05-10 12:11:12 -03:00
|
|
|
res = wait([a], timeout=-1)
|
2018-12-17 04:36:36 -04:00
|
|
|
t = time.monotonic() - t
|
2012-05-10 12:11:12 -03:00
|
|
|
self.assertEqual(res, [])
|
|
|
|
self.assertLess(t, 1)
|
|
|
|
a.close()
|
|
|
|
b.close()
|
2012-03-05 14:28:37 -04:00
|
|
|
|
2012-04-01 12:19:09 -03:00
|
|
|
#
|
|
|
|
# Issue 14151: Test invalid family on invalid environment
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestInvalidFamily(unittest.TestCase):
|
|
|
|
|
2018-06-25 21:11:06 -03:00
|
|
|
@unittest.skipIf(WIN32, "skipped on Windows")
|
2012-04-01 12:19:09 -03:00
|
|
|
def test_invalid_family(self):
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
multiprocessing.connection.Listener(r'\\.\test')
|
|
|
|
|
2018-06-25 21:11:06 -03:00
|
|
|
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
|
2012-04-03 15:12:23 -03:00
|
|
|
def test_invalid_family_win32(self):
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
multiprocessing.connection.Listener('/var/test.pipe')
|
2012-04-01 12:25:49 -03:00
|
|
|
|
2012-05-18 10:28:02 -03:00
|
|
|
#
|
|
|
|
# Issue 12098: check sys.flags of child matches that for parent
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestFlags(unittest.TestCase):
|
|
|
|
@classmethod
|
|
|
|
def run_in_grandchild(cls, conn):
|
|
|
|
conn.send(tuple(sys.flags))
|
|
|
|
|
|
|
|
@classmethod
|
2023-02-02 19:50:35 -04:00
|
|
|
def run_in_child(cls, start_method):
|
2012-05-18 10:28:02 -03:00
|
|
|
import json
|
2023-02-02 19:50:35 -04:00
|
|
|
mp = multiprocessing.get_context(start_method)
|
|
|
|
r, w = mp.Pipe(duplex=False)
|
|
|
|
p = mp.Process(target=cls.run_in_grandchild, args=(w,))
|
|
|
|
with warnings.catch_warnings(category=DeprecationWarning):
|
|
|
|
p.start()
|
2012-05-18 10:28:02 -03:00
|
|
|
grandchild_flags = r.recv()
|
|
|
|
p.join()
|
|
|
|
r.close()
|
|
|
|
w.close()
|
|
|
|
flags = (tuple(sys.flags), grandchild_flags)
|
|
|
|
print(json.dumps(flags))
|
|
|
|
|
|
|
|
def test_flags(self):
|
2019-05-13 16:15:32 -03:00
|
|
|
import json
|
2012-05-18 10:28:02 -03:00
|
|
|
# start child process using unusual flags
|
2023-02-02 19:50:35 -04:00
|
|
|
prog = (
|
|
|
|
'from test._test_multiprocessing import TestFlags; '
|
|
|
|
f'TestFlags.run_in_child({multiprocessing.get_start_method()!r})'
|
|
|
|
)
|
2012-05-18 10:28:02 -03:00
|
|
|
data = subprocess.check_output(
|
|
|
|
[sys.executable, '-E', '-S', '-O', '-c', prog])
|
|
|
|
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
|
|
|
|
self.assertEqual(child_flags, grandchild_flags)
|
|
|
|
|
2012-07-27 10:19:00 -03:00
|
|
|
#
|
|
|
|
# Test interaction with socket timeouts - see Issue #6056
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestTimeouts(unittest.TestCase):
|
|
|
|
@classmethod
|
|
|
|
def _test_timeout(cls, child, address):
|
|
|
|
time.sleep(1)
|
|
|
|
child.send(123)
|
|
|
|
child.close()
|
|
|
|
conn = multiprocessing.connection.Client(address)
|
|
|
|
conn.send(456)
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def test_timeout(self):
|
|
|
|
old_timeout = socket.getdefaulttimeout()
|
|
|
|
try:
|
|
|
|
socket.setdefaulttimeout(0.1)
|
|
|
|
parent, child = multiprocessing.Pipe(duplex=True)
|
|
|
|
l = multiprocessing.connection.Listener(family='AF_INET')
|
|
|
|
p = multiprocessing.Process(target=self._test_timeout,
|
|
|
|
args=(child, l.address))
|
|
|
|
p.start()
|
|
|
|
child.close()
|
|
|
|
self.assertEqual(parent.recv(), 123)
|
|
|
|
parent.close()
|
|
|
|
conn = l.accept()
|
|
|
|
self.assertEqual(conn.recv(), 456)
|
|
|
|
conn.close()
|
|
|
|
l.close()
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2012-07-27 10:19:00 -03:00
|
|
|
finally:
|
|
|
|
socket.setdefaulttimeout(old_timeout)
|
|
|
|
|
2012-08-14 07:41:32 -03:00
|
|
|
#
|
|
|
|
# Test what happens with no "if __name__ == '__main__'"
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestNoForkBomb(unittest.TestCase):
|
|
|
|
def test_noforkbomb(self):
|
2013-08-14 11:35:41 -03:00
|
|
|
sm = multiprocessing.get_start_method()
|
2012-08-14 07:41:32 -03:00
|
|
|
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
|
2013-08-14 11:35:41 -03:00
|
|
|
if sm != 'fork':
|
2015-05-06 01:01:52 -03:00
|
|
|
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
|
2015-02-13 09:13:33 -04:00
|
|
|
self.assertEqual(out, b'')
|
|
|
|
self.assertIn(b'RuntimeError', err)
|
2012-08-14 07:41:32 -03:00
|
|
|
else:
|
2015-05-06 01:01:52 -03:00
|
|
|
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
|
2015-02-13 09:13:33 -04:00
|
|
|
self.assertEqual(out.rstrip(), b'123')
|
|
|
|
self.assertEqual(err, b'')
|
2012-08-14 07:41:32 -03:00
|
|
|
|
2013-04-17 16:58:00 -03:00
|
|
|
#
|
|
|
|
# Issue #17555: ForkAwareThreadLock
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestForkAwareThreadLock(unittest.TestCase):
|
2017-12-14 07:04:53 -04:00
|
|
|
# We recursively start processes. Issue #17555 meant that the
|
2013-04-17 16:58:00 -03:00
|
|
|
# after fork registry would get duplicate entries for the same
|
|
|
|
# lock. The size of the registry at generation n was ~2**n.
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def child(cls, n, conn):
|
|
|
|
if n > 1:
|
|
|
|
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
|
|
|
|
p.start()
|
2013-08-21 15:45:19 -03:00
|
|
|
conn.close()
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2013-04-17 16:58:00 -03:00
|
|
|
else:
|
|
|
|
conn.send(len(util._afterfork_registry))
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
def test_lock(self):
|
|
|
|
r, w = multiprocessing.Pipe(False)
|
|
|
|
l = util.ForkAwareThreadLock()
|
|
|
|
old_size = len(util._afterfork_registry)
|
|
|
|
p = multiprocessing.Process(target=self.child, args=(5, w))
|
|
|
|
p.start()
|
2013-08-21 15:45:19 -03:00
|
|
|
w.close()
|
2013-04-17 16:58:00 -03:00
|
|
|
new_size = r.recv()
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2013-04-17 16:58:00 -03:00
|
|
|
self.assertLessEqual(new_size, old_size)
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
#
|
|
|
|
# Check that non-forked child processes do not inherit unneeded fds/handles
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestCloseFds(unittest.TestCase):
|
|
|
|
|
|
|
|
def get_high_socket_fd(self):
|
2018-06-25 21:11:06 -03:00
|
|
|
if WIN32:
|
2013-08-14 11:35:41 -03:00
|
|
|
# The child process will not have any socket handles, so
|
|
|
|
# calling socket.fromfd() should produce WSAENOTSOCK even
|
|
|
|
# if there is a handle of the same number.
|
|
|
|
return socket.socket().detach()
|
|
|
|
else:
|
|
|
|
# We want to produce a socket with an fd high enough that a
|
|
|
|
# freshly created child process will not have any fds as high.
|
|
|
|
fd = socket.socket().detach()
|
|
|
|
to_close = []
|
|
|
|
while fd < 50:
|
|
|
|
to_close.append(fd)
|
|
|
|
fd = os.dup(fd)
|
|
|
|
for x in to_close:
|
|
|
|
os.close(x)
|
|
|
|
return fd
|
|
|
|
|
|
|
|
def close(self, fd):
|
2018-06-25 21:11:06 -03:00
|
|
|
if WIN32:
|
bpo-28134: Auto-detect socket values from file descriptor (#1349)
Fix socket(fileno=fd) by auto-detecting the socket's family, type,
and proto from the file descriptor. The auto-detection can be overruled
by passing in family, type, and proto explicitly.
Without the fix, all socket except for TCP/IP over IPv4 are basically broken:
>>> s = socket.create_connection(('www.python.org', 443))
>>> s
<socket.socket fd=3, family=AddressFamily.AF_INET6, type=SocketKind.SOCK_STREAM, proto=6, laddr=('2003:58:bc4a:3b00:56ee:75ff:fe47:ca7b', 59730, 0, 0), raddr=('2a04:4e42:1b::223', 443, 0, 0)>
>>> socket.socket(fileno=s.fileno())
<socket.socket fd=3, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('2003:58:bc4a:3b00::%2550471192', 59730, 0, 2550471192), raddr=('2a04:4e42:1b:0:700c:e70b:ff7f:0%2550471192', 443, 0, 2550471192)>
Signed-off-by: Christian Heimes <christian@python.org>
2018-01-29 17:37:58 -04:00
|
|
|
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
|
2013-08-14 11:35:41 -03:00
|
|
|
else:
|
|
|
|
os.close(fd)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_closefds(cls, conn, fd):
|
|
|
|
try:
|
|
|
|
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
except Exception as e:
|
|
|
|
conn.send(e)
|
|
|
|
else:
|
|
|
|
s.close()
|
|
|
|
conn.send(None)
|
|
|
|
|
|
|
|
def test_closefd(self):
|
|
|
|
if not HAS_REDUCTION:
|
|
|
|
raise unittest.SkipTest('requires fd pickling')
|
|
|
|
|
|
|
|
reader, writer = multiprocessing.Pipe()
|
|
|
|
fd = self.get_high_socket_fd()
|
|
|
|
try:
|
|
|
|
p = multiprocessing.Process(target=self._test_closefds,
|
|
|
|
args=(writer, fd))
|
|
|
|
p.start()
|
|
|
|
writer.close()
|
|
|
|
e = reader.recv()
|
2017-09-15 10:55:31 -03:00
|
|
|
join_process(p)
|
2013-08-14 11:35:41 -03:00
|
|
|
finally:
|
|
|
|
self.close(fd)
|
|
|
|
writer.close()
|
|
|
|
reader.close()
|
|
|
|
|
|
|
|
if multiprocessing.get_start_method() == 'fork':
|
|
|
|
self.assertIs(e, None)
|
|
|
|
else:
|
|
|
|
WSAENOTSOCK = 10038
|
|
|
|
self.assertIsInstance(e, OSError)
|
|
|
|
self.assertTrue(e.errno == errno.EBADF or
|
|
|
|
e.winerror == WSAENOTSOCK, e)
|
|
|
|
|
2013-07-01 14:59:26 -03:00
|
|
|
#
|
|
|
|
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
|
|
|
|
#
|
|
|
|
|
|
|
|
class TestIgnoreEINTR(unittest.TestCase):
|
|
|
|
|
2018-06-01 11:48:34 -03:00
|
|
|
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
|
|
|
|
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
|
|
|
|
|
2013-07-01 14:59:26 -03:00
|
|
|
@classmethod
|
|
|
|
def _test_ignore(cls, conn):
|
|
|
|
def handler(signum, frame):
|
|
|
|
pass
|
|
|
|
signal.signal(signal.SIGUSR1, handler)
|
|
|
|
conn.send('ready')
|
|
|
|
x = conn.recv()
|
|
|
|
conn.send(x)
|
2018-06-01 11:48:34 -03:00
|
|
|
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
|
2013-07-01 14:59:26 -03:00
|
|
|
|
|
|
|
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
|
|
|
|
def test_ignore(self):
|
|
|
|
conn, child_conn = multiprocessing.Pipe()
|
|
|
|
try:
|
|
|
|
p = multiprocessing.Process(target=self._test_ignore,
|
|
|
|
args=(child_conn,))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
child_conn.close()
|
|
|
|
self.assertEqual(conn.recv(), 'ready')
|
|
|
|
time.sleep(0.1)
|
|
|
|
os.kill(p.pid, signal.SIGUSR1)
|
|
|
|
time.sleep(0.1)
|
|
|
|
conn.send(1234)
|
|
|
|
self.assertEqual(conn.recv(), 1234)
|
|
|
|
time.sleep(0.1)
|
|
|
|
os.kill(p.pid, signal.SIGUSR1)
|
2018-06-01 11:48:34 -03:00
|
|
|
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
|
2013-07-01 14:59:26 -03:00
|
|
|
time.sleep(0.1)
|
|
|
|
p.join()
|
|
|
|
finally:
|
|
|
|
conn.close()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_ignore_listener(cls, conn):
|
|
|
|
def handler(signum, frame):
|
|
|
|
pass
|
|
|
|
signal.signal(signal.SIGUSR1, handler)
|
2013-08-14 11:35:41 -03:00
|
|
|
with multiprocessing.connection.Listener() as l:
|
|
|
|
conn.send(l.address)
|
|
|
|
a = l.accept()
|
|
|
|
a.send('welcome')
|
2013-07-01 14:59:26 -03:00
|
|
|
|
|
|
|
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
|
|
|
|
def test_ignore_listener(self):
|
|
|
|
conn, child_conn = multiprocessing.Pipe()
|
|
|
|
try:
|
|
|
|
p = multiprocessing.Process(target=self._test_ignore_listener,
|
|
|
|
args=(child_conn,))
|
|
|
|
p.daemon = True
|
|
|
|
p.start()
|
|
|
|
child_conn.close()
|
|
|
|
address = conn.recv()
|
|
|
|
time.sleep(0.1)
|
|
|
|
os.kill(p.pid, signal.SIGUSR1)
|
|
|
|
time.sleep(0.1)
|
|
|
|
client = multiprocessing.connection.Client(address)
|
|
|
|
self.assertEqual(client.recv(), 'welcome')
|
|
|
|
p.join()
|
|
|
|
finally:
|
|
|
|
conn.close()
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
class TestStartMethod(unittest.TestCase):
|
2013-10-16 12:41:56 -03:00
|
|
|
@classmethod
|
|
|
|
def _check_context(cls, conn):
|
|
|
|
conn.send(multiprocessing.get_start_method())
|
|
|
|
|
|
|
|
def check_context(self, ctx):
|
|
|
|
r, w = ctx.Pipe(duplex=False)
|
|
|
|
p = ctx.Process(target=self._check_context, args=(w,))
|
|
|
|
p.start()
|
|
|
|
w.close()
|
|
|
|
child_method = r.recv()
|
|
|
|
r.close()
|
|
|
|
p.join()
|
|
|
|
self.assertEqual(child_method, ctx.get_start_method())
|
|
|
|
|
|
|
|
def test_context(self):
|
|
|
|
for method in ('fork', 'spawn', 'forkserver'):
|
|
|
|
try:
|
|
|
|
ctx = multiprocessing.get_context(method)
|
|
|
|
except ValueError:
|
|
|
|
continue
|
|
|
|
self.assertEqual(ctx.get_start_method(), method)
|
|
|
|
self.assertIs(ctx.get_context(), ctx)
|
|
|
|
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
|
|
|
|
self.assertRaises(ValueError, ctx.set_start_method, None)
|
|
|
|
self.check_context(ctx)
|
|
|
|
|
2023-08-15 10:58:12 -03:00
|
|
|
def test_context_check_module_types(self):
|
|
|
|
try:
|
|
|
|
ctx = multiprocessing.get_context('forkserver')
|
|
|
|
except ValueError:
|
|
|
|
raise unittest.SkipTest('forkserver should be available')
|
|
|
|
with self.assertRaisesRegex(TypeError, 'module_names must be a list of strings'):
|
|
|
|
ctx.set_forkserver_preload([1, 2, 3])
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
def test_set_get(self):
|
|
|
|
multiprocessing.set_forkserver_preload(PRELOAD)
|
|
|
|
count = 0
|
|
|
|
old_method = multiprocessing.get_start_method()
|
|
|
|
try:
|
|
|
|
for method in ('fork', 'spawn', 'forkserver'):
|
|
|
|
try:
|
2013-10-16 12:41:56 -03:00
|
|
|
multiprocessing.set_start_method(method, force=True)
|
2013-08-14 11:35:41 -03:00
|
|
|
except ValueError:
|
|
|
|
continue
|
|
|
|
self.assertEqual(multiprocessing.get_start_method(), method)
|
2013-10-16 12:41:56 -03:00
|
|
|
ctx = multiprocessing.get_context()
|
|
|
|
self.assertEqual(ctx.get_start_method(), method)
|
|
|
|
self.assertTrue(type(ctx).__name__.lower().startswith(method))
|
|
|
|
self.assertTrue(
|
|
|
|
ctx.Process.__name__.lower().startswith(method))
|
|
|
|
self.check_context(multiprocessing)
|
2013-08-14 11:35:41 -03:00
|
|
|
count += 1
|
|
|
|
finally:
|
2013-10-16 12:41:56 -03:00
|
|
|
multiprocessing.set_start_method(old_method, force=True)
|
2013-08-14 11:35:41 -03:00
|
|
|
self.assertGreaterEqual(count, 1)
|
|
|
|
|
|
|
|
def test_get_all(self):
|
|
|
|
methods = multiprocessing.get_all_start_methods()
|
|
|
|
if sys.platform == 'win32':
|
|
|
|
self.assertEqual(methods, ['spawn'])
|
|
|
|
else:
|
|
|
|
self.assertTrue(methods == ['fork', 'spawn'] or
|
2020-05-26 11:54:21 -03:00
|
|
|
methods == ['spawn', 'fork'] or
|
|
|
|
methods == ['fork', 'spawn', 'forkserver'] or
|
|
|
|
methods == ['spawn', 'fork', 'forkserver'])
|
2013-08-14 11:35:41 -03:00
|
|
|
|
2016-12-10 12:13:16 -04:00
|
|
|
def test_preload_resources(self):
|
|
|
|
if multiprocessing.get_start_method() != 'forkserver':
|
|
|
|
self.skipTest("test only relevant for 'forkserver' method")
|
|
|
|
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
|
|
|
|
rc, out, err = test.support.script_helper.assert_python_ok(name)
|
|
|
|
out = out.decode()
|
|
|
|
err = err.decode()
|
|
|
|
if out.rstrip() != 'ok' or err != '':
|
|
|
|
print(out)
|
|
|
|
print(err)
|
|
|
|
self.fail("failed spawning forkserver or grandchild")
|
|
|
|
|
2023-08-23 17:27:35 -03:00
|
|
|
@unittest.skipIf(sys.platform == "win32",
|
|
|
|
"Only Spawn on windows so no risk of mixing")
|
|
|
|
@only_run_in_spawn_testsuite("avoids redundant testing.")
|
|
|
|
def test_mixed_startmethod(self):
|
|
|
|
# Fork-based locks cannot be used with spawned process
|
|
|
|
for process_method in ["spawn", "forkserver"]:
|
|
|
|
queue = multiprocessing.get_context("fork").Queue()
|
|
|
|
process_ctx = multiprocessing.get_context(process_method)
|
|
|
|
p = process_ctx.Process(target=close_queue, args=(queue,))
|
|
|
|
err_msg = "A SemLock created in a fork"
|
|
|
|
with self.assertRaisesRegex(RuntimeError, err_msg):
|
|
|
|
p.start()
|
|
|
|
|
|
|
|
# non-fork-based locks can be used with all other start methods
|
|
|
|
for queue_method in ["spawn", "forkserver"]:
|
|
|
|
for process_method in multiprocessing.get_all_start_methods():
|
|
|
|
queue = multiprocessing.get_context(queue_method).Queue()
|
|
|
|
process_ctx = multiprocessing.get_context(process_method)
|
|
|
|
p = process_ctx.Process(target=close_queue, args=(queue,))
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
|
2023-08-30 14:07:41 -03:00
|
|
|
@classmethod
|
|
|
|
def _put_one_in_queue(cls, queue):
|
|
|
|
queue.put(1)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _put_two_and_nest_once(cls, queue):
|
|
|
|
queue.put(2)
|
|
|
|
process = multiprocessing.Process(target=cls._put_one_in_queue, args=(queue,))
|
|
|
|
process.start()
|
|
|
|
process.join()
|
|
|
|
|
|
|
|
def test_nested_startmethod(self):
|
|
|
|
# gh-108520: Regression test to ensure that child process can send its
|
|
|
|
# arguments to another process
|
|
|
|
queue = multiprocessing.Queue()
|
|
|
|
|
|
|
|
process = multiprocessing.Process(target=self._put_two_and_nest_once, args=(queue,))
|
|
|
|
process.start()
|
|
|
|
process.join()
|
|
|
|
|
|
|
|
results = []
|
|
|
|
while not queue.empty():
|
|
|
|
results.append(queue.get())
|
|
|
|
|
2023-09-22 18:49:32 -03:00
|
|
|
# gh-109706: queue.put(1) can write into the queue before queue.put(2),
|
|
|
|
# there is no synchronization in the test.
|
|
|
|
self.assertSetEqual(set(results), set([2, 1]))
|
2023-08-30 14:07:41 -03:00
|
|
|
|
2016-12-10 12:13:16 -04:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
@unittest.skipIf(sys.platform == "win32",
|
|
|
|
"test semantics don't make sense on Windows")
|
2019-05-10 17:59:08 -03:00
|
|
|
class TestResourceTracker(unittest.TestCase):
|
2017-11-03 10:31:38 -03:00
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
def test_resource_tracker(self):
|
2017-11-03 10:31:38 -03:00
|
|
|
#
|
|
|
|
# Check that killing process does not leak named semaphores
|
|
|
|
#
|
2013-08-14 11:35:41 -03:00
|
|
|
cmd = '''if 1:
|
2022-10-02 21:37:36 -03:00
|
|
|
import time, os
|
2019-05-10 17:59:08 -03:00
|
|
|
import multiprocessing as mp
|
|
|
|
from multiprocessing import resource_tracker
|
|
|
|
from multiprocessing.shared_memory import SharedMemory
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
mp.set_start_method("spawn")
|
2019-05-10 17:59:08 -03:00
|
|
|
|
|
|
|
|
|
|
|
def create_and_register_resource(rtype):
|
|
|
|
if rtype == "semaphore":
|
|
|
|
lock = mp.Lock()
|
|
|
|
return lock, lock._semlock.name
|
|
|
|
elif rtype == "shared_memory":
|
|
|
|
sm = SharedMemory(create=True, size=10)
|
|
|
|
return sm, sm._name
|
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
"Resource type {{}} not understood".format(rtype))
|
|
|
|
|
|
|
|
|
|
|
|
resource1, rname1 = create_and_register_resource("{rtype}")
|
|
|
|
resource2, rname2 = create_and_register_resource("{rtype}")
|
|
|
|
|
|
|
|
os.write({w}, rname1.encode("ascii") + b"\\n")
|
|
|
|
os.write({w}, rname2.encode("ascii") + b"\\n")
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
time.sleep(10)
|
|
|
|
'''
|
2019-05-10 17:59:08 -03:00
|
|
|
for rtype in resource_tracker._CLEANUP_FUNCS:
|
|
|
|
with self.subTest(rtype=rtype):
|
2024-02-21 08:54:57 -04:00
|
|
|
if rtype in ("noop", "dummy"):
|
2019-05-10 17:59:08 -03:00
|
|
|
# Artefact resource type used by the resource_tracker
|
2024-02-21 08:54:57 -04:00
|
|
|
# or tests
|
2019-05-10 17:59:08 -03:00
|
|
|
continue
|
|
|
|
r, w = os.pipe()
|
|
|
|
p = subprocess.Popen([sys.executable,
|
|
|
|
'-E', '-c', cmd.format(w=w, rtype=rtype)],
|
|
|
|
pass_fds=[w],
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
os.close(w)
|
|
|
|
with open(r, 'rb', closefd=True) as f:
|
|
|
|
name1 = f.readline().rstrip().decode('ascii')
|
|
|
|
name2 = f.readline().rstrip().decode('ascii')
|
|
|
|
_resource_unlink(name1, rtype)
|
|
|
|
p.terminate()
|
|
|
|
p.wait()
|
2019-06-25 19:30:17 -03:00
|
|
|
|
2022-06-15 06:42:10 -03:00
|
|
|
err_msg = (f"A {rtype} resource was leaked after a process was "
|
|
|
|
f"abruptly terminated")
|
|
|
|
for _ in support.sleeping_retry(support.SHORT_TIMEOUT,
|
|
|
|
err_msg):
|
2019-06-25 19:30:17 -03:00
|
|
|
try:
|
|
|
|
_resource_unlink(name2, rtype)
|
|
|
|
except OSError as e:
|
|
|
|
# docs say it should be ENOENT, but OSX seems to give
|
|
|
|
# EINVAL
|
|
|
|
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
|
|
|
|
break
|
2022-06-15 06:42:10 -03:00
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
err = p.stderr.read().decode('utf-8')
|
|
|
|
p.stderr.close()
|
|
|
|
expected = ('resource_tracker: There appear to be 2 leaked {} '
|
|
|
|
'objects'.format(
|
|
|
|
rtype))
|
|
|
|
self.assertRegex(err, expected)
|
|
|
|
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
|
|
|
|
|
|
|
|
def check_resource_tracker_death(self, signum, should_die):
|
2017-11-03 10:31:38 -03:00
|
|
|
# bpo-31310: if the semaphore tracker process has died, it should
|
|
|
|
# be restarted implicitly.
|
2019-05-10 17:59:08 -03:00
|
|
|
from multiprocessing.resource_tracker import _resource_tracker
|
|
|
|
pid = _resource_tracker._pid
|
2018-09-04 05:53:54 -03:00
|
|
|
if pid is not None:
|
|
|
|
os.kill(pid, signal.SIGKILL)
|
2020-03-31 15:08:12 -03:00
|
|
|
support.wait_process(pid, exitcode=-signal.SIGKILL)
|
2018-10-10 04:40:14 -03:00
|
|
|
with warnings.catch_warnings():
|
|
|
|
warnings.simplefilter("ignore")
|
2019-05-10 17:59:08 -03:00
|
|
|
_resource_tracker.ensure_running()
|
|
|
|
pid = _resource_tracker._pid
|
2018-09-04 05:53:54 -03:00
|
|
|
|
2017-11-03 10:31:38 -03:00
|
|
|
os.kill(pid, signum)
|
|
|
|
time.sleep(1.0) # give it time to die
|
|
|
|
|
|
|
|
ctx = multiprocessing.get_context("spawn")
|
2018-09-04 05:53:54 -03:00
|
|
|
with warnings.catch_warnings(record=True) as all_warn:
|
2018-10-10 04:40:14 -03:00
|
|
|
warnings.simplefilter("always")
|
2017-11-03 10:31:38 -03:00
|
|
|
sem = ctx.Semaphore()
|
|
|
|
sem.acquire()
|
|
|
|
sem.release()
|
|
|
|
wr = weakref.ref(sem)
|
|
|
|
# ensure `sem` gets collected, which triggers communication with
|
|
|
|
# the semaphore tracker
|
|
|
|
del sem
|
|
|
|
gc.collect()
|
|
|
|
self.assertIsNone(wr())
|
2018-09-04 05:53:54 -03:00
|
|
|
if should_die:
|
|
|
|
self.assertEqual(len(all_warn), 1)
|
|
|
|
the_warn = all_warn[0]
|
2018-10-10 04:40:14 -03:00
|
|
|
self.assertTrue(issubclass(the_warn.category, UserWarning))
|
2019-05-10 17:59:08 -03:00
|
|
|
self.assertTrue("resource_tracker: process died"
|
2018-09-04 05:53:54 -03:00
|
|
|
in str(the_warn.message))
|
|
|
|
else:
|
|
|
|
self.assertEqual(len(all_warn), 0)
|
2017-11-03 10:31:38 -03:00
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
def test_resource_tracker_sigint(self):
|
2017-11-03 10:31:38 -03:00
|
|
|
# Catchable signal (ignored by semaphore tracker)
|
2019-05-10 17:59:08 -03:00
|
|
|
self.check_resource_tracker_death(signal.SIGINT, False)
|
2017-11-03 10:31:38 -03:00
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
def test_resource_tracker_sigterm(self):
|
2018-09-04 05:53:54 -03:00
|
|
|
# Catchable signal (ignored by semaphore tracker)
|
2019-05-10 17:59:08 -03:00
|
|
|
self.check_resource_tracker_death(signal.SIGTERM, False)
|
2018-09-04 05:53:54 -03:00
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
def test_resource_tracker_sigkill(self):
|
2017-11-03 10:31:38 -03:00
|
|
|
# Uncatchable signal.
|
2019-05-10 17:59:08 -03:00
|
|
|
self.check_resource_tracker_death(signal.SIGKILL, True)
|
2017-11-03 10:31:38 -03:00
|
|
|
|
2019-04-24 16:45:52 -03:00
|
|
|
@staticmethod
|
2019-05-10 17:59:08 -03:00
|
|
|
def _is_resource_tracker_reused(conn, pid):
|
|
|
|
from multiprocessing.resource_tracker import _resource_tracker
|
|
|
|
_resource_tracker.ensure_running()
|
2019-04-24 16:45:52 -03:00
|
|
|
# The pid should be None in the child process, expect for the fork
|
|
|
|
# context. It should not be a new value.
|
2019-05-10 17:59:08 -03:00
|
|
|
reused = _resource_tracker._pid in (None, pid)
|
|
|
|
reused &= _resource_tracker._check_alive()
|
2019-04-24 16:45:52 -03:00
|
|
|
conn.send(reused)
|
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
def test_resource_tracker_reused(self):
|
|
|
|
from multiprocessing.resource_tracker import _resource_tracker
|
|
|
|
_resource_tracker.ensure_running()
|
|
|
|
pid = _resource_tracker._pid
|
2019-04-24 16:45:52 -03:00
|
|
|
|
|
|
|
r, w = multiprocessing.Pipe(duplex=False)
|
2019-05-10 17:59:08 -03:00
|
|
|
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
|
2019-04-24 16:45:52 -03:00
|
|
|
args=(w, pid))
|
|
|
|
p.start()
|
2019-05-10 17:59:08 -03:00
|
|
|
is_resource_tracker_reused = r.recv()
|
2019-04-24 16:45:52 -03:00
|
|
|
|
|
|
|
# Clean up
|
|
|
|
p.join()
|
|
|
|
w.close()
|
|
|
|
r.close()
|
|
|
|
|
2019-05-10 17:59:08 -03:00
|
|
|
self.assertTrue(is_resource_tracker_reused)
|
2019-04-24 16:45:52 -03:00
|
|
|
|
2022-10-02 21:41:01 -03:00
|
|
|
def test_too_long_name_resource(self):
|
|
|
|
# gh-96819: Resource names that will make the length of a write to a pipe
|
|
|
|
# greater than PIPE_BUF are not allowed
|
|
|
|
rtype = "shared_memory"
|
|
|
|
too_long_name_resource = "a" * (512 - len(rtype))
|
|
|
|
with self.assertRaises(ValueError):
|
|
|
|
resource_tracker.register(too_long_name_resource, rtype)
|
|
|
|
|
2024-02-21 08:54:57 -04:00
|
|
|
def _test_resource_tracker_leak_resources(self, cleanup):
|
|
|
|
# We use a separate instance for testing, since the main global
|
|
|
|
# _resource_tracker may be used to watch test infrastructure.
|
|
|
|
from multiprocessing.resource_tracker import ResourceTracker
|
|
|
|
tracker = ResourceTracker()
|
|
|
|
tracker.ensure_running()
|
|
|
|
self.assertTrue(tracker._check_alive())
|
|
|
|
|
|
|
|
self.assertIsNone(tracker._exitcode)
|
|
|
|
tracker.register('somename', 'dummy')
|
|
|
|
if cleanup:
|
|
|
|
tracker.unregister('somename', 'dummy')
|
|
|
|
expected_exit_code = 0
|
|
|
|
else:
|
|
|
|
expected_exit_code = 1
|
|
|
|
|
|
|
|
self.assertTrue(tracker._check_alive())
|
|
|
|
self.assertIsNone(tracker._exitcode)
|
|
|
|
tracker._stop()
|
|
|
|
self.assertEqual(tracker._exitcode, expected_exit_code)
|
|
|
|
|
|
|
|
def test_resource_tracker_exit_code(self):
|
|
|
|
"""
|
|
|
|
Test the exit code of the resource tracker.
|
|
|
|
|
|
|
|
If no leaked resources were found, exit code should be 0, otherwise 1
|
|
|
|
"""
|
|
|
|
for cleanup in [True, False]:
|
|
|
|
with self.subTest(cleanup=cleanup):
|
|
|
|
self._test_resource_tracker_leak_resources(
|
|
|
|
cleanup=cleanup,
|
|
|
|
)
|
2017-11-03 10:31:38 -03:00
|
|
|
|
2017-05-17 10:04:00 -03:00
|
|
|
class TestSimpleQueue(unittest.TestCase):
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_empty(cls, queue, child_can_start, parent_can_continue):
|
|
|
|
child_can_start.wait()
|
|
|
|
# issue 30301, could fail under spawn and forkserver
|
|
|
|
try:
|
|
|
|
queue.put(queue.empty())
|
|
|
|
queue.put(queue.empty())
|
|
|
|
finally:
|
|
|
|
parent_can_continue.set()
|
|
|
|
|
|
|
|
def test_empty(self):
|
|
|
|
queue = multiprocessing.SimpleQueue()
|
|
|
|
child_can_start = multiprocessing.Event()
|
|
|
|
parent_can_continue = multiprocessing.Event()
|
|
|
|
|
|
|
|
proc = multiprocessing.Process(
|
|
|
|
target=self._test_empty,
|
|
|
|
args=(queue, child_can_start, parent_can_continue)
|
|
|
|
)
|
|
|
|
proc.daemon = True
|
|
|
|
proc.start()
|
|
|
|
|
|
|
|
self.assertTrue(queue.empty())
|
|
|
|
|
|
|
|
child_can_start.set()
|
|
|
|
parent_can_continue.wait()
|
|
|
|
|
|
|
|
self.assertFalse(queue.empty())
|
|
|
|
self.assertEqual(queue.get(), True)
|
|
|
|
self.assertEqual(queue.get(), False)
|
|
|
|
self.assertTrue(queue.empty())
|
|
|
|
|
|
|
|
proc.join()
|
|
|
|
|
2020-04-27 13:11:10 -03:00
|
|
|
def test_close(self):
|
|
|
|
queue = multiprocessing.SimpleQueue()
|
|
|
|
queue.close()
|
|
|
|
# closing a queue twice should not fail
|
|
|
|
queue.close()
|
|
|
|
|
|
|
|
# Test specific to CPython since it tests private attributes
|
|
|
|
@test.support.cpython_only
|
|
|
|
def test_closed(self):
|
|
|
|
queue = multiprocessing.SimpleQueue()
|
|
|
|
queue.close()
|
|
|
|
self.assertTrue(queue._reader.closed)
|
|
|
|
self.assertTrue(queue._writer.closed)
|
|
|
|
|
2018-07-11 07:22:28 -03:00
|
|
|
|
2018-11-04 18:40:32 -04:00
|
|
|
class TestPoolNotLeakOnFailure(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_release_unused_processes(self):
|
|
|
|
# Issue #19675: During pool creation, if we can't create a process,
|
|
|
|
# don't leak already created ones.
|
|
|
|
will_fail_in = 3
|
|
|
|
forked_processes = []
|
|
|
|
|
|
|
|
class FailingForkProcess:
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
self.name = 'Fake Process'
|
|
|
|
self.exitcode = None
|
|
|
|
self.state = None
|
|
|
|
forked_processes.append(self)
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
nonlocal will_fail_in
|
|
|
|
if will_fail_in <= 0:
|
|
|
|
raise OSError("Manually induced OSError")
|
|
|
|
will_fail_in -= 1
|
|
|
|
self.state = 'started'
|
|
|
|
|
|
|
|
def terminate(self):
|
|
|
|
self.state = 'stopping'
|
|
|
|
|
|
|
|
def join(self):
|
|
|
|
if self.state == 'stopping':
|
|
|
|
self.state = 'stopped'
|
|
|
|
|
|
|
|
def is_alive(self):
|
|
|
|
return self.state == 'started' or self.state == 'stopping'
|
|
|
|
|
|
|
|
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
|
|
|
|
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
|
|
|
|
Process=FailingForkProcess))
|
|
|
|
p.close()
|
|
|
|
p.join()
|
|
|
|
self.assertFalse(
|
|
|
|
any(process.is_alive() for process in forked_processes))
|
|
|
|
|
|
|
|
|
2023-05-20 20:33:09 -03:00
|
|
|
@hashlib_helper.requires_hashdigest('sha256')
|
2019-02-07 07:03:11 -04:00
|
|
|
class TestSyncManagerTypes(unittest.TestCase):
|
|
|
|
"""Test all the types which can be shared between a parent and a
|
|
|
|
child process by using a manager which acts as an intermediary
|
|
|
|
between them.
|
|
|
|
|
|
|
|
In the following unit-tests the base type is created in the parent
|
|
|
|
process, the @classmethod represents the worker process and the
|
|
|
|
shared object is readable and editable between the two.
|
|
|
|
|
|
|
|
# The child.
|
|
|
|
@classmethod
|
|
|
|
def _test_list(cls, obj):
|
|
|
|
assert obj[0] == 5
|
|
|
|
assert obj.append(6)
|
|
|
|
|
|
|
|
# The parent.
|
|
|
|
def test_list(self):
|
|
|
|
o = self.manager.list()
|
|
|
|
o.append(5)
|
|
|
|
self.run_worker(self._test_list, o)
|
|
|
|
assert o[1] == 6
|
|
|
|
"""
|
|
|
|
manager_class = multiprocessing.managers.SyncManager
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
self.manager = self.manager_class()
|
|
|
|
self.manager.start()
|
|
|
|
self.proc = None
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
if self.proc is not None and self.proc.is_alive():
|
|
|
|
self.proc.terminate()
|
|
|
|
self.proc.join()
|
|
|
|
self.manager.shutdown()
|
2019-02-09 13:08:49 -04:00
|
|
|
self.manager = None
|
|
|
|
self.proc = None
|
2019-02-07 07:03:11 -04:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
support.reap_children()
|
|
|
|
|
|
|
|
tearDownClass = setUpClass
|
|
|
|
|
|
|
|
def wait_proc_exit(self):
|
|
|
|
# Only the manager process should be returned by active_children()
|
|
|
|
# but this can take a bit on slow machines, so wait a few seconds
|
|
|
|
# if there are other children too (see #17395).
|
|
|
|
join_process(self.proc)
|
2022-06-15 06:42:10 -03:00
|
|
|
|
2022-06-15 13:49:14 -03:00
|
|
|
timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT
|
2019-02-07 07:03:11 -04:00
|
|
|
start_time = time.monotonic()
|
2022-06-15 13:49:14 -03:00
|
|
|
for _ in support.sleeping_retry(timeout, error=False):
|
2022-06-15 06:42:10 -03:00
|
|
|
if len(multiprocessing.active_children()) <= 1:
|
2019-02-07 07:03:11 -04:00
|
|
|
break
|
2022-06-15 06:42:10 -03:00
|
|
|
else:
|
|
|
|
dt = time.monotonic() - start_time
|
|
|
|
support.environment_altered = True
|
|
|
|
support.print_warning(f"multiprocessing.Manager still has "
|
|
|
|
f"{multiprocessing.active_children()} "
|
|
|
|
f"active children after {dt:.1f} seconds")
|
2019-02-07 07:03:11 -04:00
|
|
|
|
|
|
|
def run_worker(self, worker, obj):
|
|
|
|
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
|
|
|
|
self.proc.daemon = True
|
|
|
|
self.proc.start()
|
|
|
|
self.wait_proc_exit()
|
|
|
|
self.assertEqual(self.proc.exitcode, 0)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_event(cls, obj):
|
|
|
|
assert obj.is_set()
|
|
|
|
obj.wait()
|
|
|
|
obj.clear()
|
|
|
|
obj.wait(0.001)
|
|
|
|
|
|
|
|
def test_event(self):
|
|
|
|
o = self.manager.Event()
|
|
|
|
o.set()
|
|
|
|
self.run_worker(self._test_event, o)
|
|
|
|
assert not o.is_set()
|
|
|
|
o.wait(0.001)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_lock(cls, obj):
|
|
|
|
obj.acquire()
|
|
|
|
|
|
|
|
def test_lock(self, lname="Lock"):
|
|
|
|
o = getattr(self.manager, lname)()
|
|
|
|
self.run_worker(self._test_lock, o)
|
|
|
|
o.release()
|
|
|
|
self.assertRaises(RuntimeError, o.release) # already released
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_rlock(cls, obj):
|
|
|
|
obj.acquire()
|
|
|
|
obj.release()
|
|
|
|
|
|
|
|
def test_rlock(self, lname="Lock"):
|
|
|
|
o = getattr(self.manager, lname)()
|
|
|
|
self.run_worker(self._test_rlock, o)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_semaphore(cls, obj):
|
|
|
|
obj.acquire()
|
|
|
|
|
|
|
|
def test_semaphore(self, sname="Semaphore"):
|
|
|
|
o = getattr(self.manager, sname)()
|
|
|
|
self.run_worker(self._test_semaphore, o)
|
|
|
|
o.release()
|
|
|
|
|
|
|
|
def test_bounded_semaphore(self):
|
|
|
|
self.test_semaphore(sname="BoundedSemaphore")
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_condition(cls, obj):
|
|
|
|
obj.acquire()
|
|
|
|
obj.release()
|
|
|
|
|
|
|
|
def test_condition(self):
|
|
|
|
o = self.manager.Condition()
|
|
|
|
self.run_worker(self._test_condition, o)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_barrier(cls, obj):
|
|
|
|
assert obj.parties == 5
|
|
|
|
obj.reset()
|
|
|
|
|
|
|
|
def test_barrier(self):
|
|
|
|
o = self.manager.Barrier(5)
|
|
|
|
self.run_worker(self._test_barrier, o)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_pool(cls, obj):
|
|
|
|
# TODO: fix https://bugs.python.org/issue35919
|
|
|
|
with obj:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def test_pool(self):
|
|
|
|
o = self.manager.Pool(processes=4)
|
|
|
|
self.run_worker(self._test_pool, o)
|
|
|
|
|
2019-02-24 00:08:16 -04:00
|
|
|
@classmethod
|
|
|
|
def _test_queue(cls, obj):
|
|
|
|
assert obj.qsize() == 2
|
|
|
|
assert obj.full()
|
|
|
|
assert not obj.empty()
|
|
|
|
assert obj.get() == 5
|
|
|
|
assert not obj.empty()
|
|
|
|
assert obj.get() == 6
|
|
|
|
assert obj.empty()
|
|
|
|
|
|
|
|
def test_queue(self, qname="Queue"):
|
|
|
|
o = getattr(self.manager, qname)(2)
|
|
|
|
o.put(5)
|
|
|
|
o.put(6)
|
|
|
|
self.run_worker(self._test_queue, o)
|
|
|
|
assert o.empty()
|
|
|
|
assert not o.full()
|
|
|
|
|
|
|
|
def test_joinable_queue(self):
|
|
|
|
self.test_queue("JoinableQueue")
|
|
|
|
|
2019-02-07 07:03:11 -04:00
|
|
|
@classmethod
|
|
|
|
def _test_list(cls, obj):
|
2022-10-19 07:07:07 -03:00
|
|
|
case = unittest.TestCase()
|
|
|
|
case.assertEqual(obj[0], 5)
|
|
|
|
case.assertEqual(obj.count(5), 1)
|
|
|
|
case.assertEqual(obj.index(5), 0)
|
2019-02-07 07:03:11 -04:00
|
|
|
obj.sort()
|
|
|
|
obj.reverse()
|
|
|
|
for x in obj:
|
|
|
|
pass
|
2022-10-19 07:07:07 -03:00
|
|
|
case.assertEqual(len(obj), 1)
|
|
|
|
case.assertEqual(obj.pop(0), 5)
|
2019-02-07 07:03:11 -04:00
|
|
|
|
|
|
|
def test_list(self):
|
|
|
|
o = self.manager.list()
|
|
|
|
o.append(5)
|
|
|
|
self.run_worker(self._test_list, o)
|
2022-10-19 07:07:07 -03:00
|
|
|
self.assertIsNotNone(o)
|
2019-02-07 07:03:11 -04:00
|
|
|
self.assertEqual(len(o), 0)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_dict(cls, obj):
|
2022-10-19 07:07:07 -03:00
|
|
|
case = unittest.TestCase()
|
|
|
|
case.assertEqual(len(obj), 1)
|
|
|
|
case.assertEqual(obj['foo'], 5)
|
|
|
|
case.assertEqual(obj.get('foo'), 5)
|
|
|
|
case.assertListEqual(list(obj.items()), [('foo', 5)])
|
|
|
|
case.assertListEqual(list(obj.keys()), ['foo'])
|
|
|
|
case.assertListEqual(list(obj.values()), [5])
|
|
|
|
case.assertDictEqual(obj.copy(), {'foo': 5})
|
|
|
|
case.assertTupleEqual(obj.popitem(), ('foo', 5))
|
2019-02-07 07:03:11 -04:00
|
|
|
|
|
|
|
def test_dict(self):
|
|
|
|
o = self.manager.dict()
|
|
|
|
o['foo'] = 5
|
|
|
|
self.run_worker(self._test_dict, o)
|
2022-10-19 07:07:07 -03:00
|
|
|
self.assertIsNotNone(o)
|
2019-02-07 07:03:11 -04:00
|
|
|
self.assertEqual(len(o), 0)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_value(cls, obj):
|
2022-10-19 07:07:07 -03:00
|
|
|
case = unittest.TestCase()
|
|
|
|
case.assertEqual(obj.value, 1)
|
|
|
|
case.assertEqual(obj.get(), 1)
|
2019-02-07 07:03:11 -04:00
|
|
|
obj.set(2)
|
|
|
|
|
|
|
|
def test_value(self):
|
|
|
|
o = self.manager.Value('i', 1)
|
|
|
|
self.run_worker(self._test_value, o)
|
|
|
|
self.assertEqual(o.value, 2)
|
|
|
|
self.assertEqual(o.get(), 2)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_array(cls, obj):
|
2022-10-19 07:07:07 -03:00
|
|
|
case = unittest.TestCase()
|
|
|
|
case.assertEqual(obj[0], 0)
|
|
|
|
case.assertEqual(obj[1], 1)
|
|
|
|
case.assertEqual(len(obj), 2)
|
|
|
|
case.assertListEqual(list(obj), [0, 1])
|
2019-02-07 07:03:11 -04:00
|
|
|
|
|
|
|
def test_array(self):
|
|
|
|
o = self.manager.Array('i', [0, 1])
|
|
|
|
self.run_worker(self._test_array, o)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _test_namespace(cls, obj):
|
2022-10-19 07:07:07 -03:00
|
|
|
case = unittest.TestCase()
|
|
|
|
case.assertEqual(obj.x, 0)
|
|
|
|
case.assertEqual(obj.y, 1)
|
2019-02-07 07:03:11 -04:00
|
|
|
|
|
|
|
def test_namespace(self):
|
|
|
|
o = self.manager.Namespace()
|
|
|
|
o.x = 0
|
|
|
|
o.y = 1
|
|
|
|
self.run_worker(self._test_namespace, o)
|
|
|
|
|
|
|
|
|
2022-06-09 13:55:12 -03:00
|
|
|
class TestNamedResource(unittest.TestCase):
|
2023-07-06 19:46:50 -03:00
|
|
|
@only_run_in_spawn_testsuite("spawn specific test.")
|
2022-06-09 13:55:12 -03:00
|
|
|
def test_global_named_resource_spawn(self):
|
|
|
|
#
|
|
|
|
# gh-90549: Check that global named resources in main module
|
|
|
|
# will not leak by a subprocess, in spawn context.
|
|
|
|
#
|
|
|
|
testfn = os_helper.TESTFN
|
|
|
|
self.addCleanup(os_helper.unlink, testfn)
|
|
|
|
with open(testfn, 'w', encoding='utf-8') as f:
|
|
|
|
f.write(textwrap.dedent('''\
|
|
|
|
import multiprocessing as mp
|
|
|
|
ctx = mp.get_context('spawn')
|
|
|
|
global_resource = ctx.Semaphore()
|
|
|
|
def submain(): pass
|
|
|
|
if __name__ == '__main__':
|
|
|
|
p = ctx.Process(target=submain)
|
|
|
|
p.start()
|
|
|
|
p.join()
|
|
|
|
'''))
|
2023-07-06 19:46:50 -03:00
|
|
|
rc, out, err = script_helper.assert_python_ok(testfn)
|
2022-06-09 13:55:12 -03:00
|
|
|
# on error, err = 'UserWarning: resource_tracker: There appear to
|
|
|
|
# be 1 leaked semaphore objects to clean up at shutdown'
|
2023-07-06 19:46:50 -03:00
|
|
|
self.assertFalse(err, msg=err.decode('utf-8'))
|
2022-06-09 13:55:12 -03:00
|
|
|
|
|
|
|
|
2018-07-11 07:22:28 -03:00
|
|
|
class MiscTestCase(unittest.TestCase):
|
|
|
|
def test__all__(self):
|
2020-08-17 02:20:40 -03:00
|
|
|
# Just make sure names in not_exported are excluded
|
2018-07-11 07:22:28 -03:00
|
|
|
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
|
2020-08-17 02:20:40 -03:00
|
|
|
not_exported=['SUBDEBUG', 'SUBWARNING'])
|
|
|
|
|
2023-07-06 19:46:50 -03:00
|
|
|
@only_run_in_spawn_testsuite("avoids redundant testing.")
|
|
|
|
def test_spawn_sys_executable_none_allows_import(self):
|
|
|
|
# Regression test for a bug introduced in
|
|
|
|
# https://github.com/python/cpython/issues/90876 that caused an
|
|
|
|
# ImportError in multiprocessing when sys.executable was None.
|
|
|
|
# This can be true in embedded environments.
|
|
|
|
rc, out, err = script_helper.assert_python_ok(
|
|
|
|
"-c",
|
|
|
|
"""if 1:
|
|
|
|
import sys
|
|
|
|
sys.executable = None
|
|
|
|
assert "multiprocessing" not in sys.modules, "already imported!"
|
|
|
|
import multiprocessing
|
|
|
|
import multiprocessing.spawn # This should not fail\n""",
|
|
|
|
)
|
|
|
|
self.assertEqual(rc, 0)
|
|
|
|
self.assertFalse(err, msg=err.decode('utf-8'))
|
|
|
|
|
2024-02-12 20:28:35 -04:00
|
|
|
def test_large_pool(self):
|
|
|
|
#
|
|
|
|
# gh-89240: Check that large pools are always okay
|
|
|
|
#
|
|
|
|
testfn = os_helper.TESTFN
|
|
|
|
self.addCleanup(os_helper.unlink, testfn)
|
|
|
|
with open(testfn, 'w', encoding='utf-8') as f:
|
|
|
|
f.write(textwrap.dedent('''\
|
|
|
|
import multiprocessing
|
|
|
|
def f(x): return x*x
|
|
|
|
if __name__ == '__main__':
|
|
|
|
with multiprocessing.Pool(200) as p:
|
|
|
|
print(sum(p.map(f, range(1000))))
|
|
|
|
'''))
|
|
|
|
rc, out, err = script_helper.assert_python_ok(testfn)
|
|
|
|
self.assertEqual("332833500", out.decode('utf-8').strip())
|
|
|
|
self.assertFalse(err, msg=err.decode('utf-8'))
|
|
|
|
|
2020-08-17 02:20:40 -03:00
|
|
|
|
2012-08-14 07:41:32 -03:00
|
|
|
#
|
2013-08-14 11:35:41 -03:00
|
|
|
# Mixins
|
2012-08-14 07:41:32 -03:00
|
|
|
#
|
2013-08-14 11:35:41 -03:00
|
|
|
|
2017-07-24 20:55:54 -03:00
|
|
|
class BaseMixin(object):
|
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
cls.dangling = (multiprocessing.process._dangling.copy(),
|
|
|
|
threading._dangling.copy())
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
# bpo-26762: Some multiprocessing objects like Pool create reference
|
|
|
|
# cycles. Trigger a garbage collection to break these cycles.
|
|
|
|
test.support.gc_collect()
|
|
|
|
|
|
|
|
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
|
|
|
|
if processes:
|
2017-08-10 12:36:50 -03:00
|
|
|
test.support.environment_altered = True
|
2020-04-23 14:03:52 -03:00
|
|
|
support.print_warning(f'Dangling processes: {processes}')
|
2017-07-24 20:55:54 -03:00
|
|
|
processes = None
|
|
|
|
|
|
|
|
threads = set(threading._dangling) - set(cls.dangling[1])
|
|
|
|
if threads:
|
2017-08-10 12:36:50 -03:00
|
|
|
test.support.environment_altered = True
|
2020-04-23 14:03:52 -03:00
|
|
|
support.print_warning(f'Dangling threads: {threads}')
|
2017-07-24 20:55:54 -03:00
|
|
|
threads = None
|
|
|
|
|
|
|
|
|
|
|
|
class ProcessesMixin(BaseMixin):
|
2013-08-14 11:35:41 -03:00
|
|
|
TYPE = 'processes'
|
|
|
|
Process = multiprocessing.Process
|
|
|
|
connection = multiprocessing.connection
|
|
|
|
current_process = staticmethod(multiprocessing.current_process)
|
2019-05-20 16:37:05 -03:00
|
|
|
parent_process = staticmethod(multiprocessing.parent_process)
|
2013-08-14 11:35:41 -03:00
|
|
|
active_children = staticmethod(multiprocessing.active_children)
|
2022-04-22 19:47:09 -03:00
|
|
|
set_executable = staticmethod(multiprocessing.set_executable)
|
2013-08-14 11:35:41 -03:00
|
|
|
Pool = staticmethod(multiprocessing.Pool)
|
|
|
|
Pipe = staticmethod(multiprocessing.Pipe)
|
|
|
|
Queue = staticmethod(multiprocessing.Queue)
|
|
|
|
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
|
|
|
|
Lock = staticmethod(multiprocessing.Lock)
|
|
|
|
RLock = staticmethod(multiprocessing.RLock)
|
|
|
|
Semaphore = staticmethod(multiprocessing.Semaphore)
|
|
|
|
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
|
|
|
|
Condition = staticmethod(multiprocessing.Condition)
|
|
|
|
Event = staticmethod(multiprocessing.Event)
|
|
|
|
Barrier = staticmethod(multiprocessing.Barrier)
|
|
|
|
Value = staticmethod(multiprocessing.Value)
|
|
|
|
Array = staticmethod(multiprocessing.Array)
|
|
|
|
RawValue = staticmethod(multiprocessing.RawValue)
|
|
|
|
RawArray = staticmethod(multiprocessing.RawArray)
|
|
|
|
|
|
|
|
|
2017-07-24 20:55:54 -03:00
|
|
|
class ManagerMixin(BaseMixin):
|
2013-08-14 11:35:41 -03:00
|
|
|
TYPE = 'manager'
|
|
|
|
Process = multiprocessing.Process
|
|
|
|
Queue = property(operator.attrgetter('manager.Queue'))
|
|
|
|
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
|
|
|
|
Lock = property(operator.attrgetter('manager.Lock'))
|
|
|
|
RLock = property(operator.attrgetter('manager.RLock'))
|
|
|
|
Semaphore = property(operator.attrgetter('manager.Semaphore'))
|
|
|
|
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
|
|
|
|
Condition = property(operator.attrgetter('manager.Condition'))
|
|
|
|
Event = property(operator.attrgetter('manager.Event'))
|
|
|
|
Barrier = property(operator.attrgetter('manager.Barrier'))
|
|
|
|
Value = property(operator.attrgetter('manager.Value'))
|
|
|
|
Array = property(operator.attrgetter('manager.Array'))
|
|
|
|
list = property(operator.attrgetter('manager.list'))
|
|
|
|
dict = property(operator.attrgetter('manager.dict'))
|
|
|
|
Namespace = property(operator.attrgetter('manager.Namespace'))
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def Pool(cls, *args, **kwds):
|
|
|
|
return cls.manager.Pool(*args, **kwds)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
2017-07-24 20:55:54 -03:00
|
|
|
super().setUpClass()
|
2013-08-14 11:35:41 -03:00
|
|
|
cls.manager = multiprocessing.Manager()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
# only the manager process should be returned by active_children()
|
|
|
|
# but this can take a bit on slow machines, so wait a few seconds
|
|
|
|
# if there are other children too (see #17395)
|
2022-06-15 13:49:14 -03:00
|
|
|
timeout = WAIT_ACTIVE_CHILDREN_TIMEOUT
|
2017-07-24 20:55:54 -03:00
|
|
|
start_time = time.monotonic()
|
2022-06-15 13:49:14 -03:00
|
|
|
for _ in support.sleeping_retry(timeout, error=False):
|
2022-06-15 06:42:10 -03:00
|
|
|
if len(multiprocessing.active_children()) <= 1:
|
2017-07-24 20:55:54 -03:00
|
|
|
break
|
2022-06-15 06:42:10 -03:00
|
|
|
else:
|
|
|
|
dt = time.monotonic() - start_time
|
|
|
|
support.environment_altered = True
|
|
|
|
support.print_warning(f"multiprocessing.Manager still has "
|
|
|
|
f"{multiprocessing.active_children()} "
|
|
|
|
f"active children after {dt:.1f} seconds")
|
2017-07-24 20:55:54 -03:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
gc.collect() # do garbage collection
|
|
|
|
if cls.manager._number_of_objects() != 0:
|
|
|
|
# This is not really an error since some tests do not
|
|
|
|
# ensure that all processes which hold a reference to a
|
|
|
|
# managed object have been joined.
|
2017-08-10 12:36:50 -03:00
|
|
|
test.support.environment_altered = True
|
2020-04-23 14:03:52 -03:00
|
|
|
support.print_warning('Shared objects which still exist '
|
|
|
|
'at manager shutdown:')
|
|
|
|
support.print_warning(cls.manager._debug_info())
|
2013-08-14 11:35:41 -03:00
|
|
|
cls.manager.shutdown()
|
|
|
|
cls.manager.join()
|
|
|
|
cls.manager = None
|
|
|
|
|
2017-07-24 20:55:54 -03:00
|
|
|
super().tearDownClass()
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
|
2017-07-24 20:55:54 -03:00
|
|
|
class ThreadsMixin(BaseMixin):
|
2013-08-14 11:35:41 -03:00
|
|
|
TYPE = 'threads'
|
|
|
|
Process = multiprocessing.dummy.Process
|
|
|
|
connection = multiprocessing.dummy.connection
|
|
|
|
current_process = staticmethod(multiprocessing.dummy.current_process)
|
|
|
|
active_children = staticmethod(multiprocessing.dummy.active_children)
|
2016-03-15 06:48:28 -03:00
|
|
|
Pool = staticmethod(multiprocessing.dummy.Pool)
|
2013-08-14 11:35:41 -03:00
|
|
|
Pipe = staticmethod(multiprocessing.dummy.Pipe)
|
|
|
|
Queue = staticmethod(multiprocessing.dummy.Queue)
|
|
|
|
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
|
|
|
|
Lock = staticmethod(multiprocessing.dummy.Lock)
|
|
|
|
RLock = staticmethod(multiprocessing.dummy.RLock)
|
|
|
|
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
|
|
|
|
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
|
|
|
|
Condition = staticmethod(multiprocessing.dummy.Condition)
|
|
|
|
Event = staticmethod(multiprocessing.dummy.Event)
|
|
|
|
Barrier = staticmethod(multiprocessing.dummy.Barrier)
|
|
|
|
Value = staticmethod(multiprocessing.dummy.Value)
|
|
|
|
Array = staticmethod(multiprocessing.dummy.Array)
|
|
|
|
|
|
|
|
#
|
|
|
|
# Functions used to create test cases from the base ones in this module
|
2012-08-14 07:41:32 -03:00
|
|
|
#
|
|
|
|
|
2023-08-24 00:35:39 -03:00
|
|
|
def install_tests_in_module_dict(remote_globs, start_method,
|
|
|
|
only_type=None, exclude_types=False):
|
2013-08-14 11:35:41 -03:00
|
|
|
__module__ = remote_globs['__name__']
|
|
|
|
local_globs = globals()
|
|
|
|
ALL_TYPES = {'processes', 'threads', 'manager'}
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
for name, base in local_globs.items():
|
|
|
|
if not isinstance(base, type):
|
|
|
|
continue
|
|
|
|
if issubclass(base, BaseTestCase):
|
|
|
|
if base is BaseTestCase:
|
|
|
|
continue
|
|
|
|
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
|
|
|
|
for type_ in base.ALLOWED_TYPES:
|
2023-08-24 00:35:39 -03:00
|
|
|
if only_type and type_ != only_type:
|
|
|
|
continue
|
|
|
|
if exclude_types:
|
|
|
|
continue
|
2013-08-14 11:35:41 -03:00
|
|
|
newname = 'With' + type_.capitalize() + name[1:]
|
|
|
|
Mixin = local_globs[type_.capitalize() + 'Mixin']
|
|
|
|
class Temp(base, Mixin, unittest.TestCase):
|
|
|
|
pass
|
2020-06-04 09:48:17 -03:00
|
|
|
if type_ == 'manager':
|
2023-05-20 20:33:09 -03:00
|
|
|
Temp = hashlib_helper.requires_hashdigest('sha256')(Temp)
|
2013-08-14 11:35:41 -03:00
|
|
|
Temp.__name__ = Temp.__qualname__ = newname
|
|
|
|
Temp.__module__ = __module__
|
|
|
|
remote_globs[newname] = Temp
|
|
|
|
elif issubclass(base, unittest.TestCase):
|
2023-08-24 00:35:39 -03:00
|
|
|
if only_type:
|
|
|
|
continue
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
class Temp(base, object):
|
|
|
|
pass
|
|
|
|
Temp.__name__ = Temp.__qualname__ = name
|
|
|
|
Temp.__module__ = __module__
|
|
|
|
remote_globs[name] = Temp
|
2008-06-11 13:44:04 -03:00
|
|
|
|
2013-08-29 10:37:47 -03:00
|
|
|
dangling = [None, None]
|
|
|
|
old_start_method = [None]
|
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
def setUpModule():
|
|
|
|
multiprocessing.set_forkserver_preload(PRELOAD)
|
2013-08-29 10:37:47 -03:00
|
|
|
multiprocessing.process._cleanup()
|
|
|
|
dangling[0] = multiprocessing.process._dangling.copy()
|
|
|
|
dangling[1] = threading._dangling.copy()
|
2013-10-16 12:41:56 -03:00
|
|
|
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
|
2013-08-14 11:35:41 -03:00
|
|
|
try:
|
2013-10-16 12:41:56 -03:00
|
|
|
multiprocessing.set_start_method(start_method, force=True)
|
2013-08-14 11:35:41 -03:00
|
|
|
except ValueError:
|
|
|
|
raise unittest.SkipTest(start_method +
|
|
|
|
' start method not supported')
|
2013-07-19 18:53:42 -03:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
if sys.platform.startswith("linux"):
|
|
|
|
try:
|
|
|
|
lock = multiprocessing.RLock()
|
|
|
|
except OSError:
|
|
|
|
raise unittest.SkipTest("OSError raises on RLock creation, "
|
|
|
|
"see issue 3111!")
|
|
|
|
check_enough_semaphores()
|
|
|
|
util.get_temp_dir() # creates temp directory
|
|
|
|
multiprocessing.get_logger().setLevel(LOG_LEVEL)
|
|
|
|
|
|
|
|
def tearDownModule():
|
2017-07-24 20:55:54 -03:00
|
|
|
need_sleep = False
|
|
|
|
|
|
|
|
# bpo-26762: Some multiprocessing objects like Pool create reference
|
|
|
|
# cycles. Trigger a garbage collection to break these cycles.
|
|
|
|
test.support.gc_collect()
|
|
|
|
|
2013-10-16 12:41:56 -03:00
|
|
|
multiprocessing.set_start_method(old_start_method[0], force=True)
|
2013-08-14 11:35:41 -03:00
|
|
|
# pause a bit so we don't get warning about dangling threads/processes
|
2017-07-24 20:55:54 -03:00
|
|
|
processes = set(multiprocessing.process._dangling) - set(dangling[0])
|
|
|
|
if processes:
|
|
|
|
need_sleep = True
|
2017-08-10 12:36:50 -03:00
|
|
|
test.support.environment_altered = True
|
2020-04-23 14:03:52 -03:00
|
|
|
support.print_warning(f'Dangling processes: {processes}')
|
2017-07-24 20:55:54 -03:00
|
|
|
processes = None
|
|
|
|
|
|
|
|
threads = set(threading._dangling) - set(dangling[1])
|
|
|
|
if threads:
|
|
|
|
need_sleep = True
|
2017-08-10 12:36:50 -03:00
|
|
|
test.support.environment_altered = True
|
2020-04-23 14:03:52 -03:00
|
|
|
support.print_warning(f'Dangling threads: {threads}')
|
2017-07-24 20:55:54 -03:00
|
|
|
threads = None
|
|
|
|
|
|
|
|
# Sleep 500 ms to give time to child processes to complete.
|
|
|
|
if need_sleep:
|
|
|
|
time.sleep(0.5)
|
2019-07-05 11:15:39 -03:00
|
|
|
|
2019-12-17 13:37:26 -04:00
|
|
|
multiprocessing.util._cleanup_tests()
|
2013-07-19 18:53:42 -03:00
|
|
|
|
2013-08-14 11:35:41 -03:00
|
|
|
remote_globs['setUpModule'] = setUpModule
|
|
|
|
remote_globs['tearDownModule'] = tearDownModule
|
2022-07-11 09:12:36 -03:00
|
|
|
|
|
|
|
|
|
|
|
@unittest.skipIf(not hasattr(_multiprocessing, 'SemLock'), 'SemLock not available')
|
2022-07-11 11:35:47 -03:00
|
|
|
@unittest.skipIf(sys.platform != "linux", "Linux only")
|
2022-07-11 09:12:36 -03:00
|
|
|
class SemLockTests(unittest.TestCase):
|
|
|
|
|
|
|
|
def test_semlock_subclass(self):
|
|
|
|
class SemLock(_multiprocessing.SemLock):
|
|
|
|
pass
|
|
|
|
name = f'test_semlock_subclass-{os.getpid()}'
|
2022-12-04 08:28:56 -04:00
|
|
|
s = SemLock(1, 0, 10, name, False)
|
2022-07-11 09:12:36 -03:00
|
|
|
_multiprocessing.sem_unlink(name)
|