bpo-34279, regrtest: Issue a warning if no tests have been executed (GH-10150)
This commit is contained in:
parent
b2774c8e91
commit
9724348b43
|
@ -14,7 +14,7 @@ from test.libregrtest.cmdline import _parse_args
|
|||
from test.libregrtest.runtest import (
|
||||
findtests, runtest, get_abs_module,
|
||||
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
|
||||
INTERRUPTED, CHILD_ERROR,
|
||||
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN,
|
||||
PROGRESS_MIN_TIME, format_test_result)
|
||||
from test.libregrtest.setup import setup_tests
|
||||
from test.libregrtest.utils import removepy, count, format_duration, printlist
|
||||
|
@ -79,6 +79,7 @@ class Regrtest:
|
|||
self.resource_denieds = []
|
||||
self.environment_changed = []
|
||||
self.rerun = []
|
||||
self.run_no_tests = []
|
||||
self.first_result = None
|
||||
self.interrupted = False
|
||||
|
||||
|
@ -118,6 +119,8 @@ class Regrtest:
|
|||
elif ok == RESOURCE_DENIED:
|
||||
self.skipped.append(test)
|
||||
self.resource_denieds.append(test)
|
||||
elif ok == TEST_DID_NOT_RUN:
|
||||
self.run_no_tests.append(test)
|
||||
elif ok != INTERRUPTED:
|
||||
raise ValueError("invalid test result: %r" % ok)
|
||||
|
||||
|
@ -368,6 +371,11 @@ class Regrtest:
|
|||
print("%s:" % count(len(self.rerun), "re-run test"))
|
||||
printlist(self.rerun)
|
||||
|
||||
if self.run_no_tests:
|
||||
print()
|
||||
print(count(len(self.run_no_tests), "test"), "run no tests:")
|
||||
printlist(self.run_no_tests)
|
||||
|
||||
def run_tests_sequential(self):
|
||||
if self.ns.trace:
|
||||
import trace
|
||||
|
@ -458,6 +466,9 @@ class Regrtest:
|
|||
result.append("FAILURE")
|
||||
elif self.ns.fail_env_changed and self.environment_changed:
|
||||
result.append("ENV CHANGED")
|
||||
elif not any((self.good, self.bad, self.skipped, self.interrupted,
|
||||
self.environment_changed)):
|
||||
result.append("NO TEST RUN")
|
||||
|
||||
if self.interrupted:
|
||||
result.append("INTERRUPTED")
|
||||
|
|
|
@ -19,6 +19,7 @@ SKIPPED = -2
|
|||
RESOURCE_DENIED = -3
|
||||
INTERRUPTED = -4
|
||||
CHILD_ERROR = -5 # error in a child process
|
||||
TEST_DID_NOT_RUN = -6 # error in a child process
|
||||
|
||||
_FORMAT_TEST_RESULT = {
|
||||
PASSED: '%s passed',
|
||||
|
@ -28,6 +29,7 @@ _FORMAT_TEST_RESULT = {
|
|||
RESOURCE_DENIED: '%s skipped (resource denied)',
|
||||
INTERRUPTED: '%s interrupted',
|
||||
CHILD_ERROR: '%s crashed',
|
||||
TEST_DID_NOT_RUN: '%s run no tests',
|
||||
}
|
||||
|
||||
# Minimum duration of a test to display its duration or to mention that
|
||||
|
@ -94,6 +96,7 @@ def runtest(ns, test):
|
|||
ENV_CHANGED test failed because it changed the execution environment
|
||||
FAILED test failed
|
||||
PASSED test passed
|
||||
EMPTY_TEST_SUITE test ran no subtests.
|
||||
|
||||
If ns.xmlpath is not None, xml_data is a list containing each
|
||||
generated testsuite element.
|
||||
|
@ -197,6 +200,8 @@ def runtest_inner(ns, test, display_failure=True):
|
|||
else:
|
||||
print("test", test, "failed", file=sys.stderr, flush=True)
|
||||
return FAILED, test_time
|
||||
except support.TestDidNotRun:
|
||||
return TEST_DID_NOT_RUN, test_time
|
||||
except:
|
||||
msg = traceback.format_exc()
|
||||
if not ns.pgo:
|
||||
|
|
|
@ -72,7 +72,7 @@ __all__ = [
|
|||
# globals
|
||||
"PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
|
||||
# exceptions
|
||||
"Error", "TestFailed", "ResourceDenied",
|
||||
"Error", "TestFailed", "TestDidNotRun", "ResourceDenied",
|
||||
# imports
|
||||
"import_module", "import_fresh_module", "CleanImport",
|
||||
# modules
|
||||
|
@ -120,6 +120,9 @@ class Error(Exception):
|
|||
class TestFailed(Error):
|
||||
"""Test failed."""
|
||||
|
||||
class TestDidNotRun(Error):
|
||||
"""Test did not run any subtests."""
|
||||
|
||||
class ResourceDenied(unittest.SkipTest):
|
||||
"""Test skipped because it requested a disallowed resource.
|
||||
|
||||
|
@ -1930,6 +1933,8 @@ def _run_suite(suite):
|
|||
if junit_xml_list is not None:
|
||||
junit_xml_list.append(result.get_xml_element())
|
||||
|
||||
if not result.testsRun:
|
||||
raise TestDidNotRun
|
||||
if not result.wasSuccessful():
|
||||
if len(result.errors) == 1 and not result.failures:
|
||||
err = result.errors[0][1]
|
||||
|
|
|
@ -351,11 +351,20 @@ class BaseTestCase(unittest.TestCase):
|
|||
self.tmptestdir = tempfile.mkdtemp()
|
||||
self.addCleanup(support.rmtree, self.tmptestdir)
|
||||
|
||||
def create_test(self, name=None, code=''):
|
||||
def create_test(self, name=None, code=None):
|
||||
if not name:
|
||||
name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
|
||||
BaseTestCase.TEST_UNIQUE_ID += 1
|
||||
|
||||
if code is None:
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_empty_test(self):
|
||||
pass
|
||||
""")
|
||||
|
||||
# test_regrtest cannot be run twice in parallel because
|
||||
# of setUp() and create_test()
|
||||
name = self.TESTNAME_PREFIX + name
|
||||
|
@ -390,7 +399,7 @@ class BaseTestCase(unittest.TestCase):
|
|||
|
||||
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
||||
env_changed=(), omitted=(),
|
||||
rerun=(),
|
||||
rerun=(), no_test_ran=(),
|
||||
randomize=False, interrupted=False,
|
||||
fail_env_changed=False):
|
||||
if isinstance(tests, str):
|
||||
|
@ -405,6 +414,8 @@ class BaseTestCase(unittest.TestCase):
|
|||
omitted = [omitted]
|
||||
if isinstance(rerun, str):
|
||||
rerun = [rerun]
|
||||
if isinstance(no_test_ran, str):
|
||||
no_test_ran = [no_test_ran]
|
||||
|
||||
executed = self.parse_executed_tests(output)
|
||||
if randomize:
|
||||
|
@ -447,8 +458,12 @@ class BaseTestCase(unittest.TestCase):
|
|||
regex = "Re-running test %r in verbose mode" % name
|
||||
self.check_line(output, regex)
|
||||
|
||||
if no_test_ran:
|
||||
regex = list_regex('%s test%s run no tests', no_test_ran)
|
||||
self.check_line(output, regex)
|
||||
|
||||
good = (len(tests) - len(skipped) - len(failed)
|
||||
- len(omitted) - len(env_changed))
|
||||
- len(omitted) - len(env_changed) - len(no_test_ran))
|
||||
if good:
|
||||
regex = r'%s test%s OK\.$' % (good, plural(good))
|
||||
if not skipped and not failed and good > 1:
|
||||
|
@ -465,12 +480,16 @@ class BaseTestCase(unittest.TestCase):
|
|||
result.append('ENV CHANGED')
|
||||
if interrupted:
|
||||
result.append('INTERRUPTED')
|
||||
if not result:
|
||||
if not any((good, result, failed, interrupted, skipped,
|
||||
env_changed, fail_env_changed)):
|
||||
result.append("NO TEST RUN")
|
||||
elif not result:
|
||||
result.append('SUCCESS')
|
||||
result = ', '.join(result)
|
||||
if rerun:
|
||||
self.check_line(output, 'Tests result: %s' % result)
|
||||
result = 'FAILURE then %s' % result
|
||||
|
||||
self.check_line(output, 'Tests result: %s' % result)
|
||||
|
||||
def parse_random_seed(self, output):
|
||||
|
@ -649,7 +668,14 @@ class ArgsTestCase(BaseTestCase):
|
|||
# test -u command line option
|
||||
tests = {}
|
||||
for resource in ('audio', 'network'):
|
||||
code = 'from test import support\nsupport.requires(%r)' % resource
|
||||
code = textwrap.dedent("""
|
||||
from test import support; support.requires(%r)
|
||||
import unittest
|
||||
class PassingTest(unittest.TestCase):
|
||||
def test_pass(self):
|
||||
pass
|
||||
""" % resource)
|
||||
|
||||
tests[resource] = self.create_test(resource, code)
|
||||
test_names = sorted(tests.values())
|
||||
|
||||
|
@ -978,6 +1004,56 @@ class ArgsTestCase(BaseTestCase):
|
|||
output = self.run_tests("-w", testname, exitcode=2)
|
||||
self.check_executed_tests(output, [testname],
|
||||
failed=testname, rerun=testname)
|
||||
def test_no_tests_ran(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
|
||||
output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
|
||||
self.check_executed_tests(output, [testname], no_test_ran=testname)
|
||||
|
||||
def test_no_tests_ran_multiple_tests_nonexistent(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
testname2 = self.create_test(code=code)
|
||||
|
||||
output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
|
||||
self.check_executed_tests(output, [testname, testname2],
|
||||
no_test_ran=[testname, testname2])
|
||||
|
||||
def test_no_test_ran_some_test_exist_some_not(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
other_code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_other_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname2 = self.create_test(code=other_code)
|
||||
|
||||
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
|
||||
"-m", "test_other_bug", exitcode=0)
|
||||
self.check_executed_tests(output, [testname, testname2],
|
||||
no_test_ran=[testname])
|
||||
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
regrtest issue a warning when no tests have been executed in a particular
|
||||
test file. Also, a new final result state is issued if no test have been
|
||||
executed across all test files. Patch by Pablo Galindo.
|
Loading…
Reference in New Issue