bpo-33718: Update regrtest from master (GH-7325)
* Add support.environment_altered: unused yet * VSTS: don't run tests with --fail-env-changed
This commit is contained in:
parent
f3297433e3
commit
137e80346f
|
@ -63,7 +63,7 @@ steps:
|
||||||
- script: ./venv/bin/python -m test.pythoninfo
|
- script: ./venv/bin/python -m test.pythoninfo
|
||||||
displayName: 'Display build info'
|
displayName: 'Display build info'
|
||||||
|
|
||||||
- script: ./venv/bin/python -m coverage run --pylib -m test --fail-env-changed -uall,-cpu -x test_multiprocessing_fork -x test_multiprocessing_forkserver -x test_multiprocessing_spawn -x test_concurrent_futures
|
- script: ./venv/bin/python -m coverage run --pylib -m test -uall,-cpu -x test_multiprocessing_fork -x test_multiprocessing_forkserver -x test_multiprocessing_spawn -x test_concurrent_futures
|
||||||
displayName: 'Tests with coverage'
|
displayName: 'Tests with coverage'
|
||||||
|
|
||||||
- script: source ./venv/bin/activate && bash <(curl -s https://codecov.io/bash)
|
- script: source ./venv/bin/activate && bash <(curl -s https://codecov.io/bash)
|
||||||
|
|
|
@ -96,7 +96,7 @@ resources to test. Currently only the following are defined:
|
||||||
|
|
||||||
largefile - It is okay to run some test that may create huge
|
largefile - It is okay to run some test that may create huge
|
||||||
files. These tests can take a long time and may
|
files. These tests can take a long time and may
|
||||||
consume >2GB of disk space temporarily.
|
consume >2 GiB of disk space temporarily.
|
||||||
|
|
||||||
network - It is okay to run tests that use external network
|
network - It is okay to run tests that use external network
|
||||||
resource, e.g. testing SSL support for sockets.
|
resource, e.g. testing SSL support for sockets.
|
||||||
|
|
|
@ -8,7 +8,6 @@ import re
|
||||||
import sys
|
import sys
|
||||||
import sysconfig
|
import sysconfig
|
||||||
import tempfile
|
import tempfile
|
||||||
import textwrap
|
|
||||||
import time
|
import time
|
||||||
import unittest
|
import unittest
|
||||||
from test.libregrtest.cmdline import _parse_args
|
from test.libregrtest.cmdline import _parse_args
|
||||||
|
@ -18,6 +17,7 @@ from test.libregrtest.runtest import (
|
||||||
INTERRUPTED, CHILD_ERROR,
|
INTERRUPTED, CHILD_ERROR,
|
||||||
PROGRESS_MIN_TIME, format_test_result)
|
PROGRESS_MIN_TIME, format_test_result)
|
||||||
from test.libregrtest.setup import setup_tests
|
from test.libregrtest.setup import setup_tests
|
||||||
|
from test.libregrtest.utils import removepy, count, format_duration, printlist
|
||||||
from test import support
|
from test import support
|
||||||
try:
|
try:
|
||||||
import gc
|
import gc
|
||||||
|
@ -41,16 +41,6 @@ else:
|
||||||
TEMPDIR = os.path.abspath(TEMPDIR)
|
TEMPDIR = os.path.abspath(TEMPDIR)
|
||||||
|
|
||||||
|
|
||||||
def format_duration(seconds):
|
|
||||||
if seconds < 1.0:
|
|
||||||
return '%.0f ms' % (seconds * 1e3)
|
|
||||||
if seconds < 60.0:
|
|
||||||
return '%.0f sec' % seconds
|
|
||||||
|
|
||||||
minutes, seconds = divmod(seconds, 60.0)
|
|
||||||
return '%.0f min %.0f sec' % (minutes, seconds)
|
|
||||||
|
|
||||||
|
|
||||||
class Regrtest:
|
class Regrtest:
|
||||||
"""Execute a test suite.
|
"""Execute a test suite.
|
||||||
|
|
||||||
|
@ -133,8 +123,9 @@ class Regrtest:
|
||||||
|
|
||||||
# "[ 51/405/1] test_tcl passed"
|
# "[ 51/405/1] test_tcl passed"
|
||||||
line = f"{test_index:{self.test_count_width}}{self.test_count}"
|
line = f"{test_index:{self.test_count_width}}{self.test_count}"
|
||||||
if self.bad and not self.ns.pgo:
|
fails = len(self.bad) + len(self.environment_changed)
|
||||||
line = f"{line}/{len(self.bad)}"
|
if fails and not self.ns.pgo:
|
||||||
|
line = f"{line}/{fails}"
|
||||||
line = f"[{line}] {test}"
|
line = f"[{line}] {test}"
|
||||||
|
|
||||||
# add the system load prefix: "load avg: 1.80 "
|
# add the system load prefix: "load avg: 1.80 "
|
||||||
|
@ -281,7 +272,6 @@ class Regrtest:
|
||||||
self.ns.verbose = True
|
self.ns.verbose = True
|
||||||
self.ns.failfast = False
|
self.ns.failfast = False
|
||||||
self.ns.verbose3 = False
|
self.ns.verbose3 = False
|
||||||
self.ns.match_tests = None
|
|
||||||
|
|
||||||
print()
|
print()
|
||||||
print("Re-running failed tests in verbose mode")
|
print("Re-running failed tests in verbose mode")
|
||||||
|
@ -312,7 +302,7 @@ class Regrtest:
|
||||||
return
|
return
|
||||||
|
|
||||||
print()
|
print()
|
||||||
print("== Tests result ==")
|
print("== Tests result: %s ==" % self.get_tests_result())
|
||||||
|
|
||||||
if self.interrupted:
|
if self.interrupted:
|
||||||
print()
|
print()
|
||||||
|
@ -323,11 +313,6 @@ class Regrtest:
|
||||||
print(count(len(omitted), "test"), "omitted:")
|
print(count(len(omitted), "test"), "omitted:")
|
||||||
printlist(omitted)
|
printlist(omitted)
|
||||||
|
|
||||||
if self.rerun:
|
|
||||||
print()
|
|
||||||
print(count(len(self.rerun), "test"), "re-run tests:")
|
|
||||||
printlist(self.rerun)
|
|
||||||
|
|
||||||
if self.good and not self.ns.quiet:
|
if self.good and not self.ns.quiet:
|
||||||
print()
|
print()
|
||||||
if (not self.bad
|
if (not self.bad
|
||||||
|
@ -360,6 +345,11 @@ class Regrtest:
|
||||||
print(count(len(self.skipped), "test"), "skipped:")
|
print(count(len(self.skipped), "test"), "skipped:")
|
||||||
printlist(self.skipped)
|
printlist(self.skipped)
|
||||||
|
|
||||||
|
if self.rerun:
|
||||||
|
print()
|
||||||
|
print("%s:" % count(len(self.rerun), "re-run test"))
|
||||||
|
printlist(self.rerun)
|
||||||
|
|
||||||
def run_tests_sequential(self):
|
def run_tests_sequential(self):
|
||||||
if self.ns.trace:
|
if self.ns.trace:
|
||||||
import trace
|
import trace
|
||||||
|
@ -444,6 +434,21 @@ class Regrtest:
|
||||||
% (locale.getpreferredencoding(False),
|
% (locale.getpreferredencoding(False),
|
||||||
sys.getfilesystemencoding()))
|
sys.getfilesystemencoding()))
|
||||||
|
|
||||||
|
def get_tests_result(self):
|
||||||
|
result = []
|
||||||
|
if self.bad:
|
||||||
|
result.append("FAILURE")
|
||||||
|
elif self.ns.fail_env_changed and self.environment_changed:
|
||||||
|
result.append("ENV CHANGED")
|
||||||
|
|
||||||
|
if self.interrupted:
|
||||||
|
result.append("INTERRUPTED")
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
result.append("SUCCESS")
|
||||||
|
|
||||||
|
return ', '.join(result)
|
||||||
|
|
||||||
def run_tests(self):
|
def run_tests(self):
|
||||||
# For a partial run, we do not need to clutter the output.
|
# For a partial run, we do not need to clutter the output.
|
||||||
if (self.ns.header
|
if (self.ns.header
|
||||||
|
@ -485,16 +490,7 @@ class Regrtest:
|
||||||
print()
|
print()
|
||||||
duration = time.monotonic() - self.start_time
|
duration = time.monotonic() - self.start_time
|
||||||
print("Total duration: %s" % format_duration(duration))
|
print("Total duration: %s" % format_duration(duration))
|
||||||
|
print("Tests result: %s" % self.get_tests_result())
|
||||||
if self.bad:
|
|
||||||
result = "FAILURE"
|
|
||||||
elif self.interrupted:
|
|
||||||
result = "INTERRUPTED"
|
|
||||||
elif self.ns.fail_env_changed and self.environment_changed:
|
|
||||||
result = "ENV CHANGED"
|
|
||||||
else:
|
|
||||||
result = "SUCCESS"
|
|
||||||
print("Tests result: %s" % result)
|
|
||||||
|
|
||||||
if self.ns.runleaks:
|
if self.ns.runleaks:
|
||||||
os.system("leaks %d" % os.getpid())
|
os.system("leaks %d" % os.getpid())
|
||||||
|
@ -561,37 +557,6 @@ class Regrtest:
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
def removepy(names):
|
|
||||||
if not names:
|
|
||||||
return
|
|
||||||
for idx, name in enumerate(names):
|
|
||||||
basename, ext = os.path.splitext(name)
|
|
||||||
if ext == '.py':
|
|
||||||
names[idx] = basename
|
|
||||||
|
|
||||||
|
|
||||||
def count(n, word):
|
|
||||||
if n == 1:
|
|
||||||
return "%d %s" % (n, word)
|
|
||||||
else:
|
|
||||||
return "%d %ss" % (n, word)
|
|
||||||
|
|
||||||
|
|
||||||
def printlist(x, width=70, indent=4, file=None):
|
|
||||||
"""Print the elements of iterable x to stdout.
|
|
||||||
|
|
||||||
Optional arg width (default 70) is the maximum line length.
|
|
||||||
Optional arg indent (default 4) is the number of blanks with which to
|
|
||||||
begin each line.
|
|
||||||
"""
|
|
||||||
|
|
||||||
blanks = ' ' * indent
|
|
||||||
# Print the sorted list: 'x' may be a '--random' list or a set()
|
|
||||||
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
|
|
||||||
initial_indent=blanks, subsequent_indent=blanks),
|
|
||||||
file=file)
|
|
||||||
|
|
||||||
|
|
||||||
def main(tests=None, **kwargs):
|
def main(tests=None, **kwargs):
|
||||||
"""Run the Python suite."""
|
"""Run the Python suite."""
|
||||||
Regrtest().main(tests=tests, **kwargs)
|
Regrtest().main(tests=tests, **kwargs)
|
||||||
|
|
|
@ -7,12 +7,6 @@ from inspect import isabstract
|
||||||
from test import support
|
from test import support
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
MAXFD = os.sysconf("SC_OPEN_MAX")
|
|
||||||
except Exception:
|
|
||||||
MAXFD = 256
|
|
||||||
|
|
||||||
|
|
||||||
def dash_R(the_module, test, indirect_test, huntrleaks):
|
def dash_R(the_module, test, indirect_test, huntrleaks):
|
||||||
"""Run a test multiple times, looking for reference leaks.
|
"""Run a test multiple times, looking for reference leaks.
|
||||||
|
|
||||||
|
|
|
@ -103,6 +103,9 @@ def runtest(ns, test):
|
||||||
faulthandler.dump_traceback_later(ns.timeout, exit=True)
|
faulthandler.dump_traceback_later(ns.timeout, exit=True)
|
||||||
try:
|
try:
|
||||||
support.set_match_tests(ns.match_tests)
|
support.set_match_tests(ns.match_tests)
|
||||||
|
# reset the environment_altered flag to detect if a test altered
|
||||||
|
# the environment
|
||||||
|
support.environment_altered = False
|
||||||
if ns.failfast:
|
if ns.failfast:
|
||||||
support.failfast = True
|
support.failfast = True
|
||||||
if output_on_failure:
|
if output_on_failure:
|
||||||
|
|
|
@ -17,6 +17,7 @@ from test.libregrtest.runtest import (
|
||||||
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
|
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
|
||||||
format_test_result)
|
format_test_result)
|
||||||
from test.libregrtest.setup import setup_tests
|
from test.libregrtest.setup import setup_tests
|
||||||
|
from test.libregrtest.utils import format_duration
|
||||||
|
|
||||||
|
|
||||||
# Display the running tests if nothing happened last N seconds
|
# Display the running tests if nothing happened last N seconds
|
||||||
|
@ -171,7 +172,8 @@ def run_tests_multiprocess(regrtest):
|
||||||
continue
|
continue
|
||||||
dt = time.monotonic() - worker.start_time
|
dt = time.monotonic() - worker.start_time
|
||||||
if dt >= PROGRESS_MIN_TIME:
|
if dt >= PROGRESS_MIN_TIME:
|
||||||
running.append('%s (%.0f sec)' % (current_test, dt))
|
text = '%s (%s)' % (current_test, format_duration(dt))
|
||||||
|
running.append(text)
|
||||||
return running
|
return running
|
||||||
|
|
||||||
finished = 0
|
finished = 0
|
||||||
|
|
|
@ -268,7 +268,13 @@ class saved_test_environment:
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
saved_values = self.saved_values
|
saved_values = self.saved_values
|
||||||
del self.saved_values
|
del self.saved_values
|
||||||
support.gc_collect() # Some resources use weak references
|
|
||||||
|
# Some resources use weak references
|
||||||
|
support.gc_collect()
|
||||||
|
|
||||||
|
# Read support.environment_altered, set by support helper functions
|
||||||
|
self.changed |= support.environment_altered
|
||||||
|
|
||||||
for name, get, restore in self.resource_info():
|
for name, get, restore in self.resource_info():
|
||||||
current = get()
|
current = get()
|
||||||
original = saved_values.pop(name)
|
original = saved_values.pop(name)
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
import os.path
|
||||||
|
import textwrap
|
||||||
|
|
||||||
|
|
||||||
|
def format_duration(seconds):
|
||||||
|
if seconds < 1.0:
|
||||||
|
return '%.0f ms' % (seconds * 1e3)
|
||||||
|
if seconds < 60.0:
|
||||||
|
return '%.0f sec' % seconds
|
||||||
|
|
||||||
|
minutes, seconds = divmod(seconds, 60.0)
|
||||||
|
hours, minutes = divmod(minutes, 60.0)
|
||||||
|
if hours:
|
||||||
|
return '%.0f hour %.0f min' % (hours, minutes)
|
||||||
|
else:
|
||||||
|
return '%.0f min %.0f sec' % (minutes, seconds)
|
||||||
|
|
||||||
|
|
||||||
|
def removepy(names):
|
||||||
|
if not names:
|
||||||
|
return
|
||||||
|
for idx, name in enumerate(names):
|
||||||
|
basename, ext = os.path.splitext(name)
|
||||||
|
if ext == '.py':
|
||||||
|
names[idx] = basename
|
||||||
|
|
||||||
|
|
||||||
|
def count(n, word):
|
||||||
|
if n == 1:
|
||||||
|
return "%d %s" % (n, word)
|
||||||
|
else:
|
||||||
|
return "%d %ss" % (n, word)
|
||||||
|
|
||||||
|
|
||||||
|
def printlist(x, width=70, indent=4, file=None):
|
||||||
|
"""Print the elements of iterable x to stdout.
|
||||||
|
|
||||||
|
Optional arg width (default 70) is the maximum line length.
|
||||||
|
Optional arg indent (default 4) is the number of blanks with which to
|
||||||
|
begin each line.
|
||||||
|
"""
|
||||||
|
|
||||||
|
blanks = ' ' * indent
|
||||||
|
# Print the sorted list: 'x' may be a '--random' list or a set()
|
||||||
|
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
|
||||||
|
initial_indent=blanks, subsequent_indent=blanks),
|
||||||
|
file=file)
|
|
@ -2069,6 +2069,14 @@ def modules_cleanup(oldmodules):
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
# Threading support to prevent reporting refleaks when running regrtest.py -R
|
# Threading support to prevent reporting refleaks when running regrtest.py -R
|
||||||
|
|
||||||
|
# Flag used by saved_test_environment of test.libregrtest.save_env,
|
||||||
|
# to check if a test modified the environment. The flag should be set to False
|
||||||
|
# before running a new test.
|
||||||
|
#
|
||||||
|
# For example, threading_cleanup() sets the flag is the function fails
|
||||||
|
# to cleanup threads.
|
||||||
|
environment_altered = False
|
||||||
|
|
||||||
# NOTE: we use thread._count() rather than threading.enumerate() (or the
|
# NOTE: we use thread._count() rather than threading.enumerate() (or the
|
||||||
# moral equivalent thereof) because a threading.Thread object is still alive
|
# moral equivalent thereof) because a threading.Thread object is still alive
|
||||||
# until its __bootstrap() method has returned, even after it has been
|
# until its __bootstrap() method has returned, even after it has been
|
||||||
|
|
|
@ -593,6 +593,8 @@ class ProgramsTestCase(BaseTestCase):
|
||||||
def test_pcbuild_rt(self):
|
def test_pcbuild_rt(self):
|
||||||
# PCbuild\rt.bat
|
# PCbuild\rt.bat
|
||||||
script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
|
script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
|
||||||
|
if not os.path.isfile(script):
|
||||||
|
self.skipTest(f'File "{script}" does not exist')
|
||||||
rt_args = ["-q"] # Quick, don't run tests twice
|
rt_args = ["-q"] # Quick, don't run tests twice
|
||||||
if platform.architecture()[0] == '64bit':
|
if platform.architecture()[0] == '64bit':
|
||||||
rt_args.append('-x64') # 64-bit build
|
rt_args.append('-x64') # 64-bit build
|
||||||
|
|
Loading…
Reference in New Issue