bpo-38614: Use test.support.SHORT_TIMEOUT constant (GH-17566)

Replace hardcoded timeout constants in tests with SHORT_TIMEOUT of
test.support, so it's easier to ajdust this timeout for all tests at
once.

SHORT_TIMEOUT is 30 seconds by default, but it can be longer
depending on --timeout command line option.

The change makes almost all timeouts longer, except
test_reap_children() of test_support which is made 2x shorter:
SHORT_TIMEOUT should be enough. If this test starts to fail,
LONG_TIMEOUT should be used instead.

Uniformize also "from test import support" import in some test files.
This commit is contained in:
Victor Stinner 2019-12-11 11:30:03 +01:00 committed by GitHub
parent b7a0109cd2
commit 0d63bacefd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 73 additions and 65 deletions

View File

@ -318,7 +318,7 @@ class _TestProcess(BaseTestCase):
def _test_report_parent_status(cls, wconn): def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive") wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=5) parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive") wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self): def test_process(self):

View File

@ -11,7 +11,7 @@ active threads survive in the child after a fork(); this is an error.
import os, sys, time, unittest import os, sys, time, unittest
import threading import threading
import test.support as support from test import support
LONGSLEEP = 2 LONGSLEEP = 2
@ -62,7 +62,7 @@ class ForkWait(unittest.TestCase):
self.threads.append(thread) self.threads.append(thread)
# busy-loop to wait for threads # busy-loop to wait for threads
deadline = time.monotonic() + 10.0 deadline = time.monotonic() + support.SHORT_TIMEOUT
while len(self.alive) < NUM_THREADS: while len(self.alive) < NUM_THREADS:
time.sleep(0.1) time.sleep(0.1)
if deadline < time.monotonic(): if deadline < time.monotonic():

View File

@ -4,6 +4,7 @@ import subprocess
import sys import sys
import time import time
import unittest import unittest
from test import support
class SIGUSR1Exception(Exception): class SIGUSR1Exception(Exception):
@ -27,7 +28,7 @@ class InterProcessSignalTests(unittest.TestCase):
# (if set) # (if set)
child.wait() child.wait()
timeout = 10.0 timeout = support.SHORT_TIMEOUT
deadline = time.monotonic() + timeout deadline = time.monotonic() + timeout
while time.monotonic() < deadline: while time.monotonic() < deadline:

View File

@ -1475,12 +1475,12 @@ class EventLoopTestsMixin:
return len(data) return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1, test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10) timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data) self.assertEqual(b'1', data)
transport.write(b'2345') transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10) timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data) self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state) self.assertEqual('CONNECTED', proto.state)
@ -1531,27 +1531,29 @@ class EventLoopTestsMixin:
return len(data) return len(data)
write_transport.write(b'1') write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10) test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data) self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state) self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state) self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a') os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1, test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10) timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state) self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes) self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state) self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345') write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10) test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data) self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state) self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state) self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde') os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5, test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10) timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state) self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes) self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state) self.assertEqual('CONNECTED', write_proto.state)

View File

@ -274,7 +274,8 @@ class BaseStartTLS(func_tests.FunctionalTestCaseMixin):
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete( self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10)) asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
# No garbage is left if SSL is closed uncleanly # No garbage is left if SSL is closed uncleanly
client_context = weakref.ref(client_context) client_context = weakref.ref(client_context)
@ -335,7 +336,8 @@ class BaseStartTLS(func_tests.FunctionalTestCaseMixin):
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete( self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10)) asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
# No garbage is left for SSL client from loop.create_connection, even # No garbage is left for SSL client from loop.create_connection, even
# if user stores the SSLTransport in corresponding protocol instance # if user stores the SSLTransport in corresponding protocol instance
@ -491,7 +493,8 @@ class BaseStartTLS(func_tests.FunctionalTestCaseMixin):
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv: with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete( self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10)) asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
def test_start_tls_server_1(self): def test_start_tls_server_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE HELLO_MSG = b'1' * self.PAYLOAD_SIZE
@ -619,7 +622,7 @@ class BaseStartTLS(func_tests.FunctionalTestCaseMixin):
*addr, *addr,
ssl=client_sslctx, ssl=client_sslctx,
server_hostname='', server_hostname='',
ssl_handshake_timeout=10.0), ssl_handshake_timeout=support.SHORT_TIMEOUT),
0.5) 0.5)
with self.tcp_server(server, with self.tcp_server(server,

View File

@ -107,7 +107,7 @@ def run_briefly(loop):
gen.close() gen.close()
def run_until(loop, pred, timeout=30): def run_until(loop, pred, timeout=support.SHORT_TIMEOUT):
deadline = time.monotonic() + timeout deadline = time.monotonic() + timeout
while not pred(): while not pred():
if timeout is not None: if timeout is not None:

View File

@ -1,9 +1,9 @@
import test.support from test import support
# Skip tests if _multiprocessing wasn't built. # Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing') support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken. # Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize') support.import_module('multiprocessing.synchronize')
from test.support.script_helper import assert_python_ok from test.support.script_helper import assert_python_ok
@ -101,11 +101,11 @@ def make_dummy_object(_):
class BaseTestCase(unittest.TestCase): class BaseTestCase(unittest.TestCase):
def setUp(self): def setUp(self):
self._thread_key = test.support.threading_setup() self._thread_key = support.threading_setup()
def tearDown(self): def tearDown(self):
test.support.reap_children() support.reap_children()
test.support.threading_cleanup(*self._thread_key) support.threading_cleanup(*self._thread_key)
class ExecutorMixin: class ExecutorMixin:
@ -132,7 +132,7 @@ class ExecutorMixin:
self.executor = None self.executor = None
dt = time.monotonic() - self.t1 dt = time.monotonic() - self.t1
if test.support.verbose: if support.verbose:
print("%.2fs" % dt, end=' ') print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long") self.assertLess(dt, 300, "synchronization issue: test lasted too long")
@ -712,7 +712,7 @@ class ExecutorTest:
self.executor.map(str, [2] * (self.worker_count + 1)) self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown() self.executor.shutdown()
@test.support.cpython_only @support.cpython_only
def test_no_stale_references(self): def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto # Issue #16284: check that the executors don't unnecessarily hang onto
# references. # references.
@ -724,7 +724,7 @@ class ExecutorTest:
self.executor.submit(my_object.my_method) self.executor.submit(my_object.my_method)
del my_object del my_object
collected = my_object_collected.wait(timeout=5.0) collected = my_object_collected.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(collected, self.assertTrue(collected,
"Stale reference not collected within timeout.") "Stale reference not collected within timeout.")
@ -836,7 +836,7 @@ class ProcessPoolExecutorTest(ExecutorTest):
self.assertIs(type(cause), futures.process._RemoteTraceback) self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb) self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1: with support.captured_stderr() as f1:
try: try:
raise exc raise exc
except RuntimeError: except RuntimeError:
@ -929,7 +929,7 @@ class ErrorAtUnpickle(object):
class ExecutorDeadlockTest: class ExecutorDeadlockTest:
TIMEOUT = 15 TIMEOUT = support.SHORT_TIMEOUT
@classmethod @classmethod
def _sleep_id(cls, x, delay): def _sleep_id(cls, x, delay):
@ -994,7 +994,7 @@ class ExecutorDeadlockTest:
for func, args, error, name in crash_cases: for func, args, error, name in crash_cases:
with self.subTest(name): with self.subTest(name):
# The captured_stderr reduces the noise in the test report # The captured_stderr reduces the noise in the test report
with test.support.captured_stderr(): with support.captured_stderr():
executor = self.executor_type( executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx)) max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args) res = executor.submit(func, *args)
@ -1061,7 +1061,7 @@ class FutureTests(BaseTestCase):
self.assertTrue(was_cancelled) self.assertTrue(was_cancelled)
def test_done_callback_raises(self): def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr: with support.captured_stderr() as stderr:
raising_was_called = False raising_was_called = False
fn_was_called = False fn_was_called = False
@ -1116,7 +1116,7 @@ class FutureTests(BaseTestCase):
self.assertTrue(was_cancelled) self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self): def test_done_callback_raises_already_succeeded(self):
with test.support.captured_stderr() as stderr: with support.captured_stderr() as stderr:
def raising_fn(callback_future): def raising_fn(callback_future):
raise Exception('doh!') raise Exception('doh!')
@ -1235,7 +1235,8 @@ class FutureTests(BaseTestCase):
t = threading.Thread(target=notification) t = threading.Thread(target=notification)
t.start() t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5) self.assertRaises(futures.CancelledError,
f1.result, timeout=support.SHORT_TIMEOUT)
t.join() t.join()
def test_exception_with_timeout(self): def test_exception_with_timeout(self):
@ -1264,7 +1265,7 @@ class FutureTests(BaseTestCase):
t = threading.Thread(target=notification) t = threading.Thread(target=notification)
t.start() t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError)) self.assertTrue(isinstance(f1.exception(timeout=support.SHORT_TIMEOUT), OSError))
t.join() t.join()
def test_multiple_set_result(self): def test_multiple_set_result(self):
@ -1300,12 +1301,12 @@ _threads_key = None
def setUpModule(): def setUpModule():
global _threads_key global _threads_key
_threads_key = test.support.threading_setup() _threads_key = support.threading_setup()
def tearDownModule(): def tearDownModule():
test.support.threading_cleanup(*_threads_key) support.threading_cleanup(*_threads_key)
test.support.reap_children() support.reap_children()
# cleanup multiprocessing # cleanup multiprocessing
multiprocessing.process._cleanup() multiprocessing.process._cleanup()
@ -1315,7 +1316,7 @@ def tearDownModule():
# bpo-37421: Explicitly call _run_finalizers() to remove immediately # bpo-37421: Explicitly call _run_finalizers() to remove immediately
# temporary directories created by multiprocessing.util.get_temp_dir(). # temporary directories created by multiprocessing.util.get_temp_dir().
multiprocessing.util._run_finalizers() multiprocessing.util._run_finalizers()
test.support.gc_collect() support.gc_collect()
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -10,15 +10,15 @@ import time
import unittest import unittest
from test.fork_wait import ForkWait from test.fork_wait import ForkWait
from test.support import reap_children, get_attribute, verbose from test import support
# Skip test if fork does not exist. # Skip test if fork does not exist.
get_attribute(os, 'fork') support.get_attribute(os, 'fork')
class ForkTest(ForkWait): class ForkTest(ForkWait):
def wait_impl(self, cpid): def wait_impl(self, cpid):
deadline = time.monotonic() + 10.0 deadline = time.monotonic() + support.SHORT_TIMEOUT
while time.monotonic() <= deadline: while time.monotonic() <= deadline:
# waitpid() shouldn't hang, but some of the buildbots seem to hang # waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem. # in the forking tests. This is an attempt to fix the problem.
@ -56,7 +56,7 @@ class ForkTest(ForkWait):
if m == complete_module: if m == complete_module:
os._exit(0) os._exit(0)
else: else:
if verbose > 1: if support.verbose > 1:
print("Child encountered partial module") print("Child encountered partial module")
os._exit(1) os._exit(1)
else: else:
@ -90,7 +90,7 @@ class ForkTest(ForkWait):
imp.release_lock() imp.release_lock()
except RuntimeError: except RuntimeError:
if in_child: if in_child:
if verbose > 1: if support.verbose > 1:
print("RuntimeError in child") print("RuntimeError in child")
os._exit(1) os._exit(1)
raise raise
@ -105,7 +105,7 @@ class ForkTest(ForkWait):
def tearDownModule(): def tearDownModule():
reap_children() support.reap_children()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -1336,7 +1336,7 @@ class PydocServerTest(unittest.TestCase):
self.assertIn('0.0.0.0', serverthread.docserver.address) self.assertIn('0.0.0.0', serverthread.docserver.address)
starttime = time.monotonic() starttime = time.monotonic()
timeout = 1 #seconds timeout = test.support.SHORT_TIMEOUT
while serverthread.serving: while serverthread.serving:
time.sleep(.01) time.sleep(.01)

View File

@ -6,7 +6,7 @@ import unittest
from test import support from test import support
TIMEOUT = 10 TIMEOUT = support.SHORT_TIMEOUT
class Timer: class Timer:

View File

@ -644,7 +644,7 @@ class SiginterruptTest(unittest.TestCase):
# wait until the child process is loaded and has started # wait until the child process is loaded and has started
first_line = process.stdout.readline() first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=5.0) stdout, stderr = process.communicate(timeout=support.SHORT_TIMEOUT)
except subprocess.TimeoutExpired: except subprocess.TimeoutExpired:
process.kill() process.kill()
return False return False
@ -1192,7 +1192,7 @@ class StressTest(unittest.TestCase):
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0 expected_sigs = 0
deadline = time.monotonic() + 15.0 deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N: while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF) os.kill(os.getpid(), signal.SIGPROF)
@ -1226,7 +1226,7 @@ class StressTest(unittest.TestCase):
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0 expected_sigs = 0
deadline = time.monotonic() + 15.0 deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N: while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during # Hopefully the SIGALRM will be received somewhere during

View File

@ -36,7 +36,7 @@ def signal_alarm(n):
# Remember real select() to avoid interferences with mocking # Remember real select() to avoid interferences with mocking
_real_select = select.select _real_select = select.select
def receive(sock, n, timeout=20): def receive(sock, n, timeout=test.support.SHORT_TIMEOUT):
r, w, x = _real_select([sock], [], [], timeout) r, w, x = _real_select([sock], [], [], timeout)
if sock in r: if sock in r:
return sock.recv(n) return sock.recv(n)

View File

@ -2156,7 +2156,7 @@ class SimpleBackgroundTests(unittest.TestCase):
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs): def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get # A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs. # (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10) timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout deadline = time.monotonic() + timeout
count = 0 count = 0
while True: while True:

View File

@ -1121,9 +1121,7 @@ class ProcessTestCase(BaseTestCase):
with self.assertRaises(subprocess.TimeoutExpired) as c: with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001) p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__. self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self): def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise # an invalid type of the bufsize argument should raise
@ -1289,7 +1287,7 @@ class ProcessTestCase(BaseTestCase):
# Wait for the process to finish; the thread should kill it # Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout # long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage. # triggers a different code path for better coverage.
proc.wait(timeout=20) proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode, self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread") msg="unexpected result in wait from main thread")

View File

@ -422,7 +422,7 @@ class TestSupport(unittest.TestCase):
os._exit(0) os._exit(0)
t0 = time.monotonic() t0 = time.monotonic()
deadline = time.monotonic() + 60.0 deadline = time.monotonic() + support.SHORT_TIMEOUT
was_altered = support.environment_altered was_altered = support.environment_altered
try: try:

View File

@ -265,7 +265,7 @@ class ThreadTests(BaseTestCase):
self.assertEqual(result, 1) # one thread state modified self.assertEqual(result, 1) # one thread state modified
if verbose: if verbose:
print(" waiting for worker to say it caught the exception") print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10) worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished) self.assertTrue(t.finished)
if verbose: if verbose:
print(" all OK -- joining worker") print(" all OK -- joining worker")
@ -640,7 +640,7 @@ class ThreadTests(BaseTestCase):
finish.release() finish.release()
# When the thread ends, the state_lock can be successfully # When the thread ends, the state_lock can be successfully
# acquired. # acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False) self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which # But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code # prevents is_alive() from knowing the thread's end-of-life C code
# is done. # is done.

View File

@ -7,7 +7,7 @@ import sys
import time import time
import unittest import unittest
from test.fork_wait import ForkWait from test.fork_wait import ForkWait
from test.support import reap_children from test import support
if not hasattr(os, 'fork'): if not hasattr(os, 'fork'):
raise unittest.SkipTest("os.fork not defined") raise unittest.SkipTest("os.fork not defined")
@ -20,7 +20,7 @@ class Wait3Test(ForkWait):
# This many iterations can be required, since some previously run # This many iterations can be required, since some previously run
# tests (e.g. test_ctypes) could have spawned a lot of children # tests (e.g. test_ctypes) could have spawned a lot of children
# very quickly. # very quickly.
deadline = time.monotonic() + 10.0 deadline = time.monotonic() + support.SHORT_TIMEOUT
while time.monotonic() <= deadline: while time.monotonic() <= deadline:
# wait3() shouldn't hang, but some of the buildbots seem to hang # wait3() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem. # in the forking tests. This is an attempt to fix the problem.
@ -50,7 +50,7 @@ class Wait3Test(ForkWait):
def tearDownModule(): def tearDownModule():
reap_children() support.reap_children()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -6,11 +6,11 @@ import time
import sys import sys
import unittest import unittest
from test.fork_wait import ForkWait from test.fork_wait import ForkWait
from test.support import reap_children, get_attribute from test import support
# If either of these do not exist, skip this test. # If either of these do not exist, skip this test.
get_attribute(os, 'fork') support.get_attribute(os, 'fork')
get_attribute(os, 'wait4') support.get_attribute(os, 'wait4')
class Wait4Test(ForkWait): class Wait4Test(ForkWait):
@ -20,7 +20,7 @@ class Wait4Test(ForkWait):
# Issue #11185: wait4 is broken on AIX and will always return 0 # Issue #11185: wait4 is broken on AIX and will always return 0
# with WNOHANG. # with WNOHANG.
option = 0 option = 0
deadline = time.monotonic() + 10.0 deadline = time.monotonic() + support.SHORT_TIMEOUT
while time.monotonic() <= deadline: while time.monotonic() <= deadline:
# wait4() shouldn't hang, but some of the buildbots seem to hang # wait4() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem. # in the forking tests. This is an attempt to fix the problem.
@ -33,7 +33,7 @@ class Wait4Test(ForkWait):
self.assertTrue(rusage) self.assertTrue(rusage)
def tearDownModule(): def tearDownModule():
reap_children() support.reap_children()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -1,3 +1,6 @@
Replace hardcoded timeout constants in tests with Replace hardcoded timeout constants in tests with
:data:`~test.support.LOOPBACK_TIMEOUT` of :mod:`test.support`, so it's easier new :mod:`test.support` constants: :data:`~test.support.LOOPBACK_TIMEOUT`,
to ajdust this timeout for all tests at once. :data:`~test.support.INTERNET_TIMEOUT`, :data:`~test.support.SHORT_TIMEOUT` and
:data:`~test.support.LONG_TIMEOUT`. It becomes easier to adjust these four
timeout constants for all tests at once, rather than having to adjust every
single test file.