bpo-33718: Update regrtest from master (GH-7325)
* Add support.environment_altered: unused yet * VSTS: don't run tests with --fail-env-changed
This commit is contained in:
parent
f3297433e3
commit
137e80346f
|
@ -63,7 +63,7 @@ steps:
|
|||
- script: ./venv/bin/python -m test.pythoninfo
|
||||
displayName: 'Display build info'
|
||||
|
||||
- script: ./venv/bin/python -m coverage run --pylib -m test --fail-env-changed -uall,-cpu -x test_multiprocessing_fork -x test_multiprocessing_forkserver -x test_multiprocessing_spawn -x test_concurrent_futures
|
||||
- script: ./venv/bin/python -m coverage run --pylib -m test -uall,-cpu -x test_multiprocessing_fork -x test_multiprocessing_forkserver -x test_multiprocessing_spawn -x test_concurrent_futures
|
||||
displayName: 'Tests with coverage'
|
||||
|
||||
- script: source ./venv/bin/activate && bash <(curl -s https://codecov.io/bash)
|
||||
|
|
|
@ -96,7 +96,7 @@ resources to test. Currently only the following are defined:
|
|||
|
||||
largefile - It is okay to run some test that may create huge
|
||||
files. These tests can take a long time and may
|
||||
consume >2GB of disk space temporarily.
|
||||
consume >2 GiB of disk space temporarily.
|
||||
|
||||
network - It is okay to run tests that use external network
|
||||
resource, e.g. testing SSL support for sockets.
|
||||
|
|
|
@ -8,7 +8,6 @@ import re
|
|||
import sys
|
||||
import sysconfig
|
||||
import tempfile
|
||||
import textwrap
|
||||
import time
|
||||
import unittest
|
||||
from test.libregrtest.cmdline import _parse_args
|
||||
|
@ -18,6 +17,7 @@ from test.libregrtest.runtest import (
|
|||
INTERRUPTED, CHILD_ERROR,
|
||||
PROGRESS_MIN_TIME, format_test_result)
|
||||
from test.libregrtest.setup import setup_tests
|
||||
from test.libregrtest.utils import removepy, count, format_duration, printlist
|
||||
from test import support
|
||||
try:
|
||||
import gc
|
||||
|
@ -41,16 +41,6 @@ else:
|
|||
TEMPDIR = os.path.abspath(TEMPDIR)
|
||||
|
||||
|
||||
def format_duration(seconds):
|
||||
if seconds < 1.0:
|
||||
return '%.0f ms' % (seconds * 1e3)
|
||||
if seconds < 60.0:
|
||||
return '%.0f sec' % seconds
|
||||
|
||||
minutes, seconds = divmod(seconds, 60.0)
|
||||
return '%.0f min %.0f sec' % (minutes, seconds)
|
||||
|
||||
|
||||
class Regrtest:
|
||||
"""Execute a test suite.
|
||||
|
||||
|
@ -133,8 +123,9 @@ class Regrtest:
|
|||
|
||||
# "[ 51/405/1] test_tcl passed"
|
||||
line = f"{test_index:{self.test_count_width}}{self.test_count}"
|
||||
if self.bad and not self.ns.pgo:
|
||||
line = f"{line}/{len(self.bad)}"
|
||||
fails = len(self.bad) + len(self.environment_changed)
|
||||
if fails and not self.ns.pgo:
|
||||
line = f"{line}/{fails}"
|
||||
line = f"[{line}] {test}"
|
||||
|
||||
# add the system load prefix: "load avg: 1.80 "
|
||||
|
@ -281,7 +272,6 @@ class Regrtest:
|
|||
self.ns.verbose = True
|
||||
self.ns.failfast = False
|
||||
self.ns.verbose3 = False
|
||||
self.ns.match_tests = None
|
||||
|
||||
print()
|
||||
print("Re-running failed tests in verbose mode")
|
||||
|
@ -312,7 +302,7 @@ class Regrtest:
|
|||
return
|
||||
|
||||
print()
|
||||
print("== Tests result ==")
|
||||
print("== Tests result: %s ==" % self.get_tests_result())
|
||||
|
||||
if self.interrupted:
|
||||
print()
|
||||
|
@ -323,11 +313,6 @@ class Regrtest:
|
|||
print(count(len(omitted), "test"), "omitted:")
|
||||
printlist(omitted)
|
||||
|
||||
if self.rerun:
|
||||
print()
|
||||
print(count(len(self.rerun), "test"), "re-run tests:")
|
||||
printlist(self.rerun)
|
||||
|
||||
if self.good and not self.ns.quiet:
|
||||
print()
|
||||
if (not self.bad
|
||||
|
@ -360,6 +345,11 @@ class Regrtest:
|
|||
print(count(len(self.skipped), "test"), "skipped:")
|
||||
printlist(self.skipped)
|
||||
|
||||
if self.rerun:
|
||||
print()
|
||||
print("%s:" % count(len(self.rerun), "re-run test"))
|
||||
printlist(self.rerun)
|
||||
|
||||
def run_tests_sequential(self):
|
||||
if self.ns.trace:
|
||||
import trace
|
||||
|
@ -444,6 +434,21 @@ class Regrtest:
|
|||
% (locale.getpreferredencoding(False),
|
||||
sys.getfilesystemencoding()))
|
||||
|
||||
def get_tests_result(self):
|
||||
result = []
|
||||
if self.bad:
|
||||
result.append("FAILURE")
|
||||
elif self.ns.fail_env_changed and self.environment_changed:
|
||||
result.append("ENV CHANGED")
|
||||
|
||||
if self.interrupted:
|
||||
result.append("INTERRUPTED")
|
||||
|
||||
if not result:
|
||||
result.append("SUCCESS")
|
||||
|
||||
return ', '.join(result)
|
||||
|
||||
def run_tests(self):
|
||||
# For a partial run, we do not need to clutter the output.
|
||||
if (self.ns.header
|
||||
|
@ -485,16 +490,7 @@ class Regrtest:
|
|||
print()
|
||||
duration = time.monotonic() - self.start_time
|
||||
print("Total duration: %s" % format_duration(duration))
|
||||
|
||||
if self.bad:
|
||||
result = "FAILURE"
|
||||
elif self.interrupted:
|
||||
result = "INTERRUPTED"
|
||||
elif self.ns.fail_env_changed and self.environment_changed:
|
||||
result = "ENV CHANGED"
|
||||
else:
|
||||
result = "SUCCESS"
|
||||
print("Tests result: %s" % result)
|
||||
print("Tests result: %s" % self.get_tests_result())
|
||||
|
||||
if self.ns.runleaks:
|
||||
os.system("leaks %d" % os.getpid())
|
||||
|
@ -561,37 +557,6 @@ class Regrtest:
|
|||
sys.exit(0)
|
||||
|
||||
|
||||
def removepy(names):
|
||||
if not names:
|
||||
return
|
||||
for idx, name in enumerate(names):
|
||||
basename, ext = os.path.splitext(name)
|
||||
if ext == '.py':
|
||||
names[idx] = basename
|
||||
|
||||
|
||||
def count(n, word):
|
||||
if n == 1:
|
||||
return "%d %s" % (n, word)
|
||||
else:
|
||||
return "%d %ss" % (n, word)
|
||||
|
||||
|
||||
def printlist(x, width=70, indent=4, file=None):
|
||||
"""Print the elements of iterable x to stdout.
|
||||
|
||||
Optional arg width (default 70) is the maximum line length.
|
||||
Optional arg indent (default 4) is the number of blanks with which to
|
||||
begin each line.
|
||||
"""
|
||||
|
||||
blanks = ' ' * indent
|
||||
# Print the sorted list: 'x' may be a '--random' list or a set()
|
||||
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
|
||||
initial_indent=blanks, subsequent_indent=blanks),
|
||||
file=file)
|
||||
|
||||
|
||||
def main(tests=None, **kwargs):
|
||||
"""Run the Python suite."""
|
||||
Regrtest().main(tests=tests, **kwargs)
|
||||
|
|
|
@ -7,12 +7,6 @@ from inspect import isabstract
|
|||
from test import support
|
||||
|
||||
|
||||
try:
|
||||
MAXFD = os.sysconf("SC_OPEN_MAX")
|
||||
except Exception:
|
||||
MAXFD = 256
|
||||
|
||||
|
||||
def dash_R(the_module, test, indirect_test, huntrleaks):
|
||||
"""Run a test multiple times, looking for reference leaks.
|
||||
|
||||
|
|
|
@ -103,6 +103,9 @@ def runtest(ns, test):
|
|||
faulthandler.dump_traceback_later(ns.timeout, exit=True)
|
||||
try:
|
||||
support.set_match_tests(ns.match_tests)
|
||||
# reset the environment_altered flag to detect if a test altered
|
||||
# the environment
|
||||
support.environment_altered = False
|
||||
if ns.failfast:
|
||||
support.failfast = True
|
||||
if output_on_failure:
|
||||
|
|
|
@ -17,6 +17,7 @@ from test.libregrtest.runtest import (
|
|||
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
|
||||
format_test_result)
|
||||
from test.libregrtest.setup import setup_tests
|
||||
from test.libregrtest.utils import format_duration
|
||||
|
||||
|
||||
# Display the running tests if nothing happened last N seconds
|
||||
|
@ -171,7 +172,8 @@ def run_tests_multiprocess(regrtest):
|
|||
continue
|
||||
dt = time.monotonic() - worker.start_time
|
||||
if dt >= PROGRESS_MIN_TIME:
|
||||
running.append('%s (%.0f sec)' % (current_test, dt))
|
||||
text = '%s (%s)' % (current_test, format_duration(dt))
|
||||
running.append(text)
|
||||
return running
|
||||
|
||||
finished = 0
|
||||
|
|
|
@ -268,7 +268,13 @@ class saved_test_environment:
|
|||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
saved_values = self.saved_values
|
||||
del self.saved_values
|
||||
support.gc_collect() # Some resources use weak references
|
||||
|
||||
# Some resources use weak references
|
||||
support.gc_collect()
|
||||
|
||||
# Read support.environment_altered, set by support helper functions
|
||||
self.changed |= support.environment_altered
|
||||
|
||||
for name, get, restore in self.resource_info():
|
||||
current = get()
|
||||
original = saved_values.pop(name)
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
import os.path
|
||||
import textwrap
|
||||
|
||||
|
||||
def format_duration(seconds):
|
||||
if seconds < 1.0:
|
||||
return '%.0f ms' % (seconds * 1e3)
|
||||
if seconds < 60.0:
|
||||
return '%.0f sec' % seconds
|
||||
|
||||
minutes, seconds = divmod(seconds, 60.0)
|
||||
hours, minutes = divmod(minutes, 60.0)
|
||||
if hours:
|
||||
return '%.0f hour %.0f min' % (hours, minutes)
|
||||
else:
|
||||
return '%.0f min %.0f sec' % (minutes, seconds)
|
||||
|
||||
|
||||
def removepy(names):
|
||||
if not names:
|
||||
return
|
||||
for idx, name in enumerate(names):
|
||||
basename, ext = os.path.splitext(name)
|
||||
if ext == '.py':
|
||||
names[idx] = basename
|
||||
|
||||
|
||||
def count(n, word):
|
||||
if n == 1:
|
||||
return "%d %s" % (n, word)
|
||||
else:
|
||||
return "%d %ss" % (n, word)
|
||||
|
||||
|
||||
def printlist(x, width=70, indent=4, file=None):
|
||||
"""Print the elements of iterable x to stdout.
|
||||
|
||||
Optional arg width (default 70) is the maximum line length.
|
||||
Optional arg indent (default 4) is the number of blanks with which to
|
||||
begin each line.
|
||||
"""
|
||||
|
||||
blanks = ' ' * indent
|
||||
# Print the sorted list: 'x' may be a '--random' list or a set()
|
||||
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
|
||||
initial_indent=blanks, subsequent_indent=blanks),
|
||||
file=file)
|
|
@ -2069,6 +2069,14 @@ def modules_cleanup(oldmodules):
|
|||
#=======================================================================
|
||||
# Threading support to prevent reporting refleaks when running regrtest.py -R
|
||||
|
||||
# Flag used by saved_test_environment of test.libregrtest.save_env,
|
||||
# to check if a test modified the environment. The flag should be set to False
|
||||
# before running a new test.
|
||||
#
|
||||
# For example, threading_cleanup() sets the flag is the function fails
|
||||
# to cleanup threads.
|
||||
environment_altered = False
|
||||
|
||||
# NOTE: we use thread._count() rather than threading.enumerate() (or the
|
||||
# moral equivalent thereof) because a threading.Thread object is still alive
|
||||
# until its __bootstrap() method has returned, even after it has been
|
||||
|
|
|
@ -593,6 +593,8 @@ class ProgramsTestCase(BaseTestCase):
|
|||
def test_pcbuild_rt(self):
|
||||
# PCbuild\rt.bat
|
||||
script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
|
||||
if not os.path.isfile(script):
|
||||
self.skipTest(f'File "{script}" does not exist')
|
||||
rt_args = ["-q"] # Quick, don't run tests twice
|
||||
if platform.architecture()[0] == '64bit':
|
||||
rt_args.append('-x64') # 64-bit build
|
||||
|
|
Loading…
Reference in New Issue