[2.7] bpo-30523, bpo-30764, bpo-30776: Sync regrtest from master (#2444)
* bpo-30523: regrtest --list-cases --match (#2401) * regrtest --list-cases now supports --match and --match-file options. Example: ./python -m test --list-cases -m FileTests test_os * --list-cases now also sets support.verbose to False to prevent messages to stdout when loading test modules. * Add support._match_test() private function. (cherry picked from commitace56d5836
) (cherry picked from commit36946c06a3
) * bpo-30764: regrtest: add --fail-env-changed option (#2402) * bpo-30764: regrtest: change exit code on failure * Exit code 2 if failed tests ("bad") * Exit code 3 if interrupted * bpo-30764: regrtest: add --fail-env-changed option If the option is set, mark a test as failed if it alters the environment, for example if it creates a file without removing it. (cherry picked from commit63f54c6893
) (cherry picked from commit1f33857a36
) * bpo-30776: reduce regrtest -R false positives (#2422) * Change the regrtest --huntrleaks checker to decide if a test file leaks or not. Require that each run leaks at least 1 reference. * Warmup runs are now completely ignored: ignored in the checker test and not used anymore to compute the sum. * Add an unit test for a reference leak. Example of reference differences previously considered a failure (leak) and now considered as success (success, no leak): [3, 0, 0] [0, 1, 0] [8, -8, 1] (cherry picked from commit48b5c422ff
) (cherry picked from commite0f8b43a46
)
This commit is contained in:
parent
2d775589d9
commit
fea98bfcff
|
@ -67,6 +67,8 @@ Special runs
|
|||
don't execute them
|
||||
--list-cases -- only write the name of test cases that will be run,
|
||||
don't execute them
|
||||
--fail-env-changed -- if a test file alters the environment, mark the test
|
||||
as failed
|
||||
|
||||
|
||||
Additional Option Details:
|
||||
|
@ -327,7 +329,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
|
|||
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
|
||||
'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo',
|
||||
'failfast', 'match=', 'testdir=', 'list-tests', 'list-cases',
|
||||
'coverage', 'matchfile='])
|
||||
'coverage', 'matchfile=', 'fail-env-changed'])
|
||||
except getopt.error, msg:
|
||||
usage(2, msg)
|
||||
|
||||
|
@ -339,6 +341,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
|
|||
slaveargs = None
|
||||
list_tests = False
|
||||
list_cases_opt = False
|
||||
fail_env_changed = False
|
||||
for o, a in opts:
|
||||
if o in ('-h', '--help'):
|
||||
usage(0)
|
||||
|
@ -439,6 +442,8 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
|
|||
list_tests = True
|
||||
elif o == '--list-cases':
|
||||
list_cases_opt = True
|
||||
elif o == '--fail-env-changed':
|
||||
fail_env_changed = True
|
||||
else:
|
||||
print >>sys.stderr, ("No handler for option {}. Please "
|
||||
"report this as a bug at http://bugs.python.org.").format(o)
|
||||
|
@ -558,7 +563,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
|
|||
sys.exit(0)
|
||||
|
||||
if list_cases_opt:
|
||||
list_cases(testdir, selected)
|
||||
list_cases(testdir, selected, match_tests)
|
||||
sys.exit(0)
|
||||
|
||||
if trace:
|
||||
|
@ -908,11 +913,19 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
|
|||
result = "FAILURE"
|
||||
elif interrupted:
|
||||
result = "INTERRUPTED"
|
||||
elif environment_changed and fail_env_changed:
|
||||
result = "ENV CHANGED"
|
||||
else:
|
||||
result = "SUCCESS"
|
||||
print("Tests result: %s" % result)
|
||||
|
||||
sys.exit(len(bad) > 0 or interrupted)
|
||||
if bad:
|
||||
sys.exit(2)
|
||||
if interrupted:
|
||||
sys.exit(130)
|
||||
if fail_env_changed and environment_changed:
|
||||
sys.exit(3)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
STDTESTS = [
|
||||
|
@ -1310,7 +1323,18 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
|
|||
if i >= nwarmup:
|
||||
deltas.append(rc_after - rc_before)
|
||||
print >> sys.stderr
|
||||
if any(deltas):
|
||||
|
||||
# bpo-30776: Try to ignore false positives:
|
||||
#
|
||||
# [3, 0, 0]
|
||||
# [0, 1, 0]
|
||||
# [8, -8, 1]
|
||||
#
|
||||
# Expected leaks:
|
||||
#
|
||||
# [5, 5, 6]
|
||||
# [10, 1, 1]
|
||||
if all(delta >= 1 for delta in deltas):
|
||||
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
|
||||
print >> sys.stderr, msg
|
||||
with open(fname, "a") as refrep:
|
||||
|
@ -1501,9 +1525,13 @@ def _list_cases(suite):
|
|||
if isinstance(test, unittest.TestSuite):
|
||||
_list_cases(test)
|
||||
elif isinstance(test, unittest.TestCase):
|
||||
print(test.id())
|
||||
if test_support._match_test(test):
|
||||
print(test.id())
|
||||
|
||||
def list_cases(testdir, selected, match_tests):
|
||||
test_support.verbose = False
|
||||
test_support.match_tests = match_tests
|
||||
|
||||
def list_cases(testdir, selected):
|
||||
skipped = []
|
||||
for test in selected:
|
||||
abstest = get_abs_module(testdir, test)
|
||||
|
|
|
@ -1542,6 +1542,23 @@ def _run_suite(suite):
|
|||
raise TestFailed(err)
|
||||
|
||||
|
||||
def _match_test(test):
|
||||
global match_tests
|
||||
|
||||
if match_tests is None:
|
||||
return True
|
||||
test_id = test.id()
|
||||
|
||||
for match_test in match_tests:
|
||||
if fnmatch.fnmatchcase(test_id, match_test):
|
||||
return True
|
||||
|
||||
for name in test_id.split("."):
|
||||
if fnmatch.fnmatchcase(name, match_test):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def run_unittest(*classes):
|
||||
"""Run tests from unittest.TestCase-derived classes."""
|
||||
valid_types = (unittest.TestSuite, unittest.TestCase)
|
||||
|
@ -1556,20 +1573,7 @@ def run_unittest(*classes):
|
|||
suite.addTest(cls)
|
||||
else:
|
||||
suite.addTest(unittest.makeSuite(cls))
|
||||
def case_pred(test):
|
||||
if match_tests is None:
|
||||
return True
|
||||
test_id = test.id()
|
||||
|
||||
for match_test in match_tests:
|
||||
if fnmatch.fnmatchcase(test_id, match_test):
|
||||
return True
|
||||
|
||||
for name in test_id.split("."):
|
||||
if fnmatch.fnmatchcase(name, match_test):
|
||||
return True
|
||||
return False
|
||||
_filter_suite(suite, case_pred)
|
||||
_filter_suite(suite, _match_test)
|
||||
_run_suite(suite)
|
||||
|
||||
#=======================================================================
|
||||
|
|
|
@ -91,19 +91,19 @@ class BaseTestCase(unittest.TestCase):
|
|||
return list(match.group(1) for match in parser)
|
||||
|
||||
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
||||
omitted=(), randomize=False, interrupted=False):
|
||||
env_changed=(), omitted=(),
|
||||
randomize=False, interrupted=False,
|
||||
fail_env_changed=False):
|
||||
if isinstance(tests, str):
|
||||
tests = [tests]
|
||||
if isinstance(skipped, str):
|
||||
skipped = [skipped]
|
||||
if isinstance(failed, str):
|
||||
failed = [failed]
|
||||
if isinstance(env_changed, str):
|
||||
env_changed = [env_changed]
|
||||
if isinstance(omitted, str):
|
||||
omitted = [omitted]
|
||||
ntest = len(tests)
|
||||
nskipped = len(skipped)
|
||||
nfailed = len(failed)
|
||||
nomitted = len(omitted)
|
||||
|
||||
executed = self.parse_executed_tests(output)
|
||||
if randomize:
|
||||
|
@ -129,11 +129,17 @@ class BaseTestCase(unittest.TestCase):
|
|||
regex = list_regex('%s test%s failed', failed)
|
||||
self.check_line(output, regex)
|
||||
|
||||
if env_changed:
|
||||
regex = list_regex('%s test%s altered the execution environment',
|
||||
env_changed)
|
||||
self.check_line(output, regex)
|
||||
|
||||
if omitted:
|
||||
regex = list_regex('%s test%s omitted', omitted)
|
||||
self.check_line(output, regex)
|
||||
|
||||
good = ntest - nskipped - nfailed - nomitted
|
||||
good = (len(tests) - len(skipped) - len(failed)
|
||||
- len(omitted) - len(env_changed))
|
||||
if good:
|
||||
regex = r'%s test%s OK\.$' % (good, plural(good))
|
||||
if not skipped and not failed and good > 1:
|
||||
|
@ -143,10 +149,12 @@ class BaseTestCase(unittest.TestCase):
|
|||
if interrupted:
|
||||
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
|
||||
|
||||
if nfailed:
|
||||
if failed:
|
||||
result = 'FAILURE'
|
||||
elif interrupted:
|
||||
result = 'INTERRUPTED'
|
||||
elif fail_env_changed and env_changed:
|
||||
result = 'ENV CHANGED'
|
||||
else:
|
||||
result = 'SUCCESS'
|
||||
self.check_line(output, 'Tests result: %s' % result)
|
||||
|
@ -325,7 +333,7 @@ class ArgsTestCase(BaseTestCase):
|
|||
test_failing = self.create_test('failing', code=code)
|
||||
tests = [test_ok, test_failing]
|
||||
|
||||
output = self.run_tests(*tests, exitcode=1)
|
||||
output = self.run_tests(*tests, exitcode=2)
|
||||
self.check_executed_tests(output, tests, failed=test_failing)
|
||||
|
||||
def test_resources(self):
|
||||
|
@ -394,7 +402,7 @@ class ArgsTestCase(BaseTestCase):
|
|||
def test_interrupted(self):
|
||||
code = TEST_INTERRUPTED
|
||||
test = self.create_test('sigint', code=code)
|
||||
output = self.run_tests(test, exitcode=1)
|
||||
output = self.run_tests(test, exitcode=130)
|
||||
self.check_executed_tests(output, test, omitted=test,
|
||||
interrupted=True)
|
||||
|
||||
|
@ -423,7 +431,7 @@ class ArgsTestCase(BaseTestCase):
|
|||
args = ("--slowest", "-j2", test)
|
||||
else:
|
||||
args = ("--slowest", test)
|
||||
output = self.run_tests(*args, exitcode=1)
|
||||
output = self.run_tests(*args, exitcode=130)
|
||||
self.check_executed_tests(output, test,
|
||||
omitted=test, interrupted=True)
|
||||
|
||||
|
@ -461,9 +469,47 @@ class ArgsTestCase(BaseTestCase):
|
|||
support.run_unittest(ForeverTester)
|
||||
""")
|
||||
test = self.create_test('forever', code=code)
|
||||
output = self.run_tests('--forever', test, exitcode=1)
|
||||
output = self.run_tests('--forever', test, exitcode=2)
|
||||
self.check_executed_tests(output, [test]*3, failed=test)
|
||||
|
||||
def check_leak(self, code, what):
|
||||
test = self.create_test('huntrleaks', code=code)
|
||||
|
||||
filename = 'reflog.txt'
|
||||
self.addCleanup(support.unlink, filename)
|
||||
output = self.run_tests('--huntrleaks', '3:3:', test,
|
||||
exitcode=2,
|
||||
stderr=subprocess.STDOUT)
|
||||
self.check_executed_tests(output, [test], failed=test)
|
||||
|
||||
line = 'beginning 6 repetitions\n123456\n......\n'
|
||||
self.check_line(output, re.escape(line))
|
||||
|
||||
line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
|
||||
self.assertIn(line2, output)
|
||||
|
||||
with open(filename) as fp:
|
||||
reflog = fp.read()
|
||||
self.assertIn(line2, reflog)
|
||||
|
||||
@unittest.skipUnless(Py_DEBUG, 'need a debug build')
|
||||
def test_huntrleaks(self):
|
||||
# test --huntrleaks
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
from test import support
|
||||
|
||||
GLOBAL_LIST = []
|
||||
|
||||
class RefLeakTest(unittest.TestCase):
|
||||
def test_leak(self):
|
||||
GLOBAL_LIST.append(object())
|
||||
|
||||
def test_main():
|
||||
support.run_unittest(RefLeakTest)
|
||||
""")
|
||||
self.check_leak(code, 'references')
|
||||
|
||||
def test_list_tests(self):
|
||||
# test --list-tests
|
||||
tests = [self.create_test() for i in range(5)]
|
||||
|
@ -471,6 +517,32 @@ class ArgsTestCase(BaseTestCase):
|
|||
self.assertEqual(output.rstrip().splitlines(),
|
||||
tests)
|
||||
|
||||
def test_list_cases(self):
|
||||
# test --list-cases
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_method1(self):
|
||||
pass
|
||||
def test_method2(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
|
||||
# Test --list-cases
|
||||
all_methods = ['%s.Tests.test_method1' % testname,
|
||||
'%s.Tests.test_method2' % testname]
|
||||
output = self.run_tests('--list-cases', testname)
|
||||
self.assertEqual(output.splitlines(), all_methods)
|
||||
|
||||
# Test --list-cases with --match
|
||||
all_methods = ['%s.Tests.test_method1' % testname]
|
||||
output = self.run_tests('--list-cases',
|
||||
'-m', 'test_method1',
|
||||
testname)
|
||||
self.assertEqual(output.splitlines(), all_methods)
|
||||
|
||||
def test_crashed(self):
|
||||
# Any code which causes a crash
|
||||
code = 'import test.support; test.support._crash_python()'
|
||||
|
@ -478,7 +550,7 @@ class ArgsTestCase(BaseTestCase):
|
|||
ok_test = self.create_test(name="ok")
|
||||
|
||||
tests = [crash_test, ok_test]
|
||||
output = self.run_tests("-j2", *tests, exitcode=1)
|
||||
output = self.run_tests("-j2", *tests, exitcode=2)
|
||||
self.check_executed_tests(output, tests, failed=crash_test,
|
||||
randomize=True)
|
||||
|
||||
|
@ -532,26 +604,28 @@ class ArgsTestCase(BaseTestCase):
|
|||
subset = ['test_method1', 'test_method3']
|
||||
self.assertEqual(methods, subset)
|
||||
|
||||
def test_list_cases(self):
|
||||
# test --list-cases
|
||||
def test_env_changed(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
from test import support
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_method1(self):
|
||||
pass
|
||||
def test_method2(self):
|
||||
pass
|
||||
def test_env_changed(self):
|
||||
open("env_changed", "w").close()
|
||||
|
||||
def test_main():
|
||||
support.run_unittest(Tests)
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
all_methods = ['%s.Tests.test_method1' % testname,
|
||||
'%s.Tests.test_method2' % testname]
|
||||
output = self.run_tests('--list-cases', testname)
|
||||
self.assertEqual(output.splitlines(), all_methods)
|
||||
|
||||
# don't fail by default
|
||||
output = self.run_tests(testname)
|
||||
self.check_executed_tests(output, [testname], env_changed=testname)
|
||||
|
||||
# fail with --fail-env-changed
|
||||
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
|
||||
self.check_executed_tests(output, [testname], env_changed=testname,
|
||||
fail_env_changed=True)
|
||||
|
||||
|
||||
def test_main():
|
||||
|
|
Loading…
Reference in New Issue