2015-09-26 05:38:01 -03:00
|
|
|
import os
|
2015-09-29 17:48:52 -03:00
|
|
|
import random
|
2015-09-26 05:38:01 -03:00
|
|
|
import re
|
2023-09-26 15:46:52 -03:00
|
|
|
import shlex
|
2015-09-26 05:38:01 -03:00
|
|
|
import sys
|
2023-09-30 19:37:23 -03:00
|
|
|
import sysconfig
|
2016-03-22 11:14:09 -03:00
|
|
|
import time
|
2023-09-10 20:11:22 -03:00
|
|
|
|
|
|
|
from test import support
|
|
|
|
from test.support import os_helper
|
|
|
|
|
2023-09-11 04:02:35 -03:00
|
|
|
from .cmdline import _parse_args, Namespace
|
2023-09-11 05:52:03 -03:00
|
|
|
from .findtests import findtests, split_test_packages, list_cases
|
2023-09-11 04:02:35 -03:00
|
|
|
from .logger import Logger
|
2023-09-28 21:51:22 -03:00
|
|
|
from .pgo import setup_pgo_tests
|
2023-09-11 04:02:35 -03:00
|
|
|
from .result import State
|
2023-09-28 21:51:22 -03:00
|
|
|
from .results import TestResults, EXITCODE_INTERRUPTED
|
2023-09-11 04:02:35 -03:00
|
|
|
from .runtests import RunTests, HuntRefleak
|
|
|
|
from .setup import setup_process, setup_test_dir
|
|
|
|
from .single import run_single_test, PROGRESS_MIN_TIME
|
|
|
|
from .utils import (
|
2023-09-11 00:27:37 -03:00
|
|
|
StrPath, StrJSON, TestName, TestList, TestTuple, FilterTuple,
|
2023-09-10 21:07:18 -03:00
|
|
|
strip_py_suffix, count, format_duration,
|
2023-09-11 05:52:03 -03:00
|
|
|
printlist, get_temp_dir, get_work_dir, exit_timeout,
|
2023-09-26 15:46:52 -03:00
|
|
|
display_header, cleanup_temp_dir, print_warning,
|
2023-09-30 19:37:23 -03:00
|
|
|
is_cross_compiled, get_host_runner,
|
2023-09-28 21:51:22 -03:00
|
|
|
MS_WINDOWS, EXIT_TIMEOUT)
|
2019-09-18 03:29:25 -03:00
|
|
|
|
|
|
|
|
2015-09-29 17:48:52 -03:00
|
|
|
class Regrtest:
|
2015-09-26 05:38:01 -03:00
|
|
|
"""Execute a test suite.
|
|
|
|
|
|
|
|
This also parses command-line options and modifies its behavior
|
|
|
|
accordingly.
|
|
|
|
|
|
|
|
tests -- a list of strings containing test names (optional)
|
|
|
|
testdir -- the directory in which to look for tests (optional)
|
|
|
|
|
|
|
|
Users other than the Python test suite will certainly want to
|
|
|
|
specify testdir; if it's omitted, the directory containing the
|
|
|
|
Python test suite is searched for.
|
|
|
|
|
|
|
|
If the tests argument is omitted, the tests listed on the
|
|
|
|
command-line will be used. If that's empty, too, then all *.py
|
|
|
|
files beginning with test_ will be used.
|
|
|
|
|
|
|
|
The other default arguments (verbose, quiet, exclude,
|
2021-11-12 11:19:09 -04:00
|
|
|
single, randomize, use_resources, trace, coverdir,
|
2015-09-26 05:38:01 -03:00
|
|
|
print_slow, and random_seed) allow programmers calling main()
|
|
|
|
directly to set the values that would normally be set by flags
|
|
|
|
on the command line.
|
|
|
|
"""
|
2023-09-26 18:59:11 -03:00
|
|
|
def __init__(self, ns: Namespace, _add_python_opts: bool = False):
|
2023-09-10 22:46:26 -03:00
|
|
|
# Log verbosity
|
2023-09-11 00:27:37 -03:00
|
|
|
self.verbose: int = int(ns.verbose)
|
2023-09-10 22:46:26 -03:00
|
|
|
self.quiet: bool = ns.quiet
|
|
|
|
self.pgo: bool = ns.pgo
|
|
|
|
self.pgo_extended: bool = ns.pgo_extended
|
|
|
|
|
|
|
|
# Test results
|
|
|
|
self.results: TestResults = TestResults()
|
|
|
|
self.first_state: str | None = None
|
|
|
|
|
|
|
|
# Logger
|
|
|
|
self.logger = Logger(self.results, self.quiet, self.pgo)
|
2023-09-10 00:04:26 -03:00
|
|
|
|
2023-09-08 20:48:54 -03:00
|
|
|
# Actions
|
2023-09-08 22:37:48 -03:00
|
|
|
self.want_header: bool = ns.header
|
|
|
|
self.want_list_tests: bool = ns.list_tests
|
|
|
|
self.want_list_cases: bool = ns.list_cases
|
|
|
|
self.want_wait: bool = ns.wait
|
|
|
|
self.want_cleanup: bool = ns.cleanup
|
2023-09-09 22:07:05 -03:00
|
|
|
self.want_rerun: bool = ns.rerun
|
|
|
|
self.want_run_leaks: bool = ns.runleaks
|
2023-09-26 18:59:11 -03:00
|
|
|
|
2023-09-30 19:37:23 -03:00
|
|
|
self.ci_mode: bool = (ns.fast_ci or ns.slow_ci)
|
2023-09-26 18:59:11 -03:00
|
|
|
self.want_add_python_opts: bool = (_add_python_opts
|
2023-09-30 19:37:23 -03:00
|
|
|
and ns._add_python_opts)
|
2023-09-08 20:48:54 -03:00
|
|
|
|
|
|
|
# Select tests
|
|
|
|
if ns.match_tests:
|
2023-09-14 15:33:18 -03:00
|
|
|
self.match_tests: FilterTuple | None = tuple(ns.match_tests)
|
2023-09-08 20:48:54 -03:00
|
|
|
else:
|
|
|
|
self.match_tests = None
|
|
|
|
if ns.ignore_tests:
|
2023-09-14 15:33:18 -03:00
|
|
|
self.ignore_tests: FilterTuple | None = tuple(ns.ignore_tests)
|
2023-09-08 20:48:54 -03:00
|
|
|
else:
|
|
|
|
self.ignore_tests = None
|
2023-09-08 22:37:48 -03:00
|
|
|
self.exclude: bool = ns.exclude
|
2023-09-09 22:07:05 -03:00
|
|
|
self.fromfile: StrPath | None = ns.fromfile
|
|
|
|
self.starting_test: TestName | None = ns.start
|
|
|
|
self.cmdline_args: TestList = ns.args
|
2023-09-08 20:48:54 -03:00
|
|
|
|
2023-09-09 22:07:05 -03:00
|
|
|
# Workers
|
2023-09-09 21:24:38 -03:00
|
|
|
if ns.use_mp is None:
|
|
|
|
num_workers = 0 # run sequentially
|
|
|
|
elif ns.use_mp <= 0:
|
|
|
|
num_workers = -1 # use the number of CPUs
|
|
|
|
else:
|
|
|
|
num_workers = ns.use_mp
|
|
|
|
self.num_workers: int = num_workers
|
2023-09-09 22:07:05 -03:00
|
|
|
self.worker_json: StrJSON | None = ns.worker_json
|
2023-09-09 21:24:38 -03:00
|
|
|
|
2023-09-08 20:48:54 -03:00
|
|
|
# Options to run tests
|
2023-09-08 22:37:48 -03:00
|
|
|
self.fail_fast: bool = ns.failfast
|
2023-09-09 21:24:38 -03:00
|
|
|
self.fail_env_changed: bool = ns.fail_env_changed
|
2023-09-09 22:07:05 -03:00
|
|
|
self.fail_rerun: bool = ns.fail_rerun
|
2023-09-08 22:37:48 -03:00
|
|
|
self.forever: bool = ns.forever
|
|
|
|
self.output_on_failure: bool = ns.verbose3
|
|
|
|
self.timeout: float | None = ns.timeout
|
2023-09-09 06:18:14 -03:00
|
|
|
if ns.huntrleaks:
|
2023-09-10 20:11:22 -03:00
|
|
|
warmups, runs, filename = ns.huntrleaks
|
|
|
|
filename = os.path.abspath(filename)
|
2023-09-14 15:33:18 -03:00
|
|
|
self.hunt_refleak: HuntRefleak | None = HuntRefleak(warmups, runs, filename)
|
2023-09-09 06:18:14 -03:00
|
|
|
else:
|
|
|
|
self.hunt_refleak = None
|
2023-09-09 22:07:05 -03:00
|
|
|
self.test_dir: StrPath | None = ns.testdir
|
|
|
|
self.junit_filename: StrPath | None = ns.xmlpath
|
2023-09-09 20:41:21 -03:00
|
|
|
self.memory_limit: str | None = ns.memlimit
|
|
|
|
self.gc_threshold: int | None = ns.threshold
|
2023-09-14 15:33:18 -03:00
|
|
|
self.use_resources: tuple[str, ...] = tuple(ns.use_resources)
|
2023-09-12 00:35:08 -03:00
|
|
|
if ns.python:
|
2023-09-14 15:33:18 -03:00
|
|
|
self.python_cmd: tuple[str, ...] | None = tuple(ns.python)
|
2023-09-12 00:35:08 -03:00
|
|
|
else:
|
|
|
|
self.python_cmd = None
|
2023-09-09 22:07:05 -03:00
|
|
|
self.coverage: bool = ns.trace
|
|
|
|
self.coverage_dir: StrPath | None = ns.coverdir
|
|
|
|
self.tmp_dir: StrPath | None = ns.tempdir
|
2023-09-08 20:48:54 -03:00
|
|
|
|
2023-10-01 17:41:03 -03:00
|
|
|
# Randomize
|
|
|
|
self.randomize: bool = ns.randomize
|
|
|
|
self.random_seed: int | None = ns.random_seed
|
|
|
|
if 'SOURCE_DATE_EPOCH' in os.environ:
|
|
|
|
self.randomize = False
|
|
|
|
self.random_seed = None
|
|
|
|
|
2015-09-29 17:48:52 -03:00
|
|
|
# tests
|
2023-09-08 20:48:54 -03:00
|
|
|
self.first_runtests: RunTests | None = None
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-09 22:07:05 -03:00
|
|
|
# used by --slowest
|
|
|
|
self.print_slowest: bool = ns.print_slow
|
2015-09-26 05:38:01 -03:00
|
|
|
|
2015-09-29 17:48:52 -03:00
|
|
|
# used to display the progress bar "[ 3/100]"
|
2023-09-02 13:09:36 -03:00
|
|
|
self.start_time = time.perf_counter()
|
2015-09-26 05:38:01 -03:00
|
|
|
|
2015-09-29 17:48:52 -03:00
|
|
|
# used by --single
|
2023-09-09 22:07:05 -03:00
|
|
|
self.single_test_run: bool = ns.single
|
|
|
|
self.next_single_test: TestName | None = None
|
|
|
|
self.next_single_filename: StrPath | None = None
|
2015-09-26 05:38:01 -03:00
|
|
|
|
2019-10-03 11:15:16 -03:00
|
|
|
def log(self, line=''):
|
2023-09-10 00:04:26 -03:00
|
|
|
self.logger.log(line)
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList | None]:
|
2023-09-09 22:07:05 -03:00
|
|
|
if self.single_test_run:
|
2019-05-14 10:49:16 -03:00
|
|
|
self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest')
|
2015-09-29 17:48:52 -03:00
|
|
|
try:
|
|
|
|
with open(self.next_single_filename, 'r') as fp:
|
|
|
|
next_test = fp.read().strip()
|
2023-09-11 00:27:37 -03:00
|
|
|
tests = [next_test]
|
2015-09-29 17:48:52 -03:00
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2023-09-08 20:48:54 -03:00
|
|
|
if self.fromfile:
|
2023-09-11 00:27:37 -03:00
|
|
|
tests = []
|
2016-03-24 05:43:00 -03:00
|
|
|
# regex to match 'test_builtin' in line:
|
|
|
|
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
|
2017-01-02 20:38:58 -04:00
|
|
|
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
|
2023-09-08 20:48:54 -03:00
|
|
|
with open(os.path.join(os_helper.SAVEDCWD, self.fromfile)) as fp:
|
2015-09-29 17:48:52 -03:00
|
|
|
for line in fp:
|
2016-12-09 11:05:51 -04:00
|
|
|
line = line.split('#', 1)[0]
|
2016-03-24 05:43:00 -03:00
|
|
|
line = line.strip()
|
2016-12-09 11:05:51 -04:00
|
|
|
match = regex.search(line)
|
|
|
|
if match is not None:
|
2023-09-11 00:27:37 -03:00
|
|
|
tests.append(match.group())
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
strip_py_suffix(tests)
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-08 22:37:48 -03:00
|
|
|
if self.pgo:
|
2019-07-24 01:33:48 -03:00
|
|
|
# add default PGO tests if no tests are specified
|
2023-09-09 22:07:05 -03:00
|
|
|
setup_pgo_tests(self.cmdline_args, self.pgo_extended)
|
2019-07-22 16:54:25 -03:00
|
|
|
|
2023-09-03 18:37:15 -03:00
|
|
|
exclude_tests = set()
|
2023-09-08 20:48:54 -03:00
|
|
|
if self.exclude:
|
2023-09-09 22:07:05 -03:00
|
|
|
for arg in self.cmdline_args:
|
2023-09-03 18:37:15 -03:00
|
|
|
exclude_tests.add(arg)
|
2023-09-09 22:07:05 -03:00
|
|
|
self.cmdline_args = []
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-09 06:18:14 -03:00
|
|
|
alltests = findtests(testdir=self.test_dir,
|
|
|
|
exclude=exclude_tests)
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-08 20:48:54 -03:00
|
|
|
if not self.fromfile:
|
2023-09-11 00:27:37 -03:00
|
|
|
selected = tests or self.cmdline_args
|
|
|
|
if selected:
|
|
|
|
selected = split_test_packages(selected)
|
2023-08-23 23:44:58 -03:00
|
|
|
else:
|
2023-09-11 00:27:37 -03:00
|
|
|
selected = alltests
|
2016-03-24 05:43:00 -03:00
|
|
|
else:
|
2023-09-11 00:27:37 -03:00
|
|
|
selected = tests
|
2023-08-23 23:44:58 -03:00
|
|
|
|
2023-09-09 22:07:05 -03:00
|
|
|
if self.single_test_run:
|
2023-09-11 00:27:37 -03:00
|
|
|
selected = selected[:1]
|
2015-09-29 17:48:52 -03:00
|
|
|
try:
|
2023-09-11 00:27:37 -03:00
|
|
|
pos = alltests.index(selected[0])
|
2015-09-29 17:48:52 -03:00
|
|
|
self.next_single_test = alltests[pos + 1]
|
|
|
|
except IndexError:
|
|
|
|
pass
|
|
|
|
|
2015-09-29 19:59:35 -03:00
|
|
|
# Remove all the selected tests that precede start if it's set.
|
2023-09-08 20:48:54 -03:00
|
|
|
if self.starting_test:
|
2015-09-29 17:48:52 -03:00
|
|
|
try:
|
2023-09-11 00:27:37 -03:00
|
|
|
del selected[:selected.index(self.starting_test)]
|
2015-09-29 17:48:52 -03:00
|
|
|
except ValueError:
|
2023-09-08 20:48:54 -03:00
|
|
|
print(f"Cannot find starting test: {self.starting_test}")
|
2023-09-03 18:37:15 -03:00
|
|
|
sys.exit(1)
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-08 20:48:54 -03:00
|
|
|
if self.randomize:
|
|
|
|
if self.random_seed is None:
|
|
|
|
self.random_seed = random.randrange(100_000_000)
|
|
|
|
random.seed(self.random_seed)
|
2023-09-11 00:27:37 -03:00
|
|
|
random.shuffle(selected)
|
|
|
|
|
|
|
|
return (tuple(selected), tests)
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-09 22:07:05 -03:00
|
|
|
@staticmethod
|
2023-09-11 00:27:37 -03:00
|
|
|
def list_tests(tests: TestTuple):
|
2023-09-09 22:07:05 -03:00
|
|
|
for name in tests:
|
2015-10-02 19:21:12 -03:00
|
|
|
print(name)
|
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
def _rerun_failed_tests(self, runtests: RunTests):
|
2023-09-03 18:37:15 -03:00
|
|
|
# Configure the runner to re-run tests
|
2023-09-09 21:24:38 -03:00
|
|
|
if self.num_workers == 0:
|
2023-09-11 00:27:37 -03:00
|
|
|
# Always run tests in fresh processes to have more deterministic
|
|
|
|
# initial state. Don't re-run tests in parallel but limit to a
|
|
|
|
# single worker process to have side effects (on the system load
|
|
|
|
# and timings) between tests.
|
2023-09-09 21:24:38 -03:00
|
|
|
self.num_workers = 1
|
2023-09-03 18:37:15 -03:00
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
tests, match_tests_dict = self.results.prepare_rerun()
|
2023-09-03 18:37:15 -03:00
|
|
|
|
|
|
|
# Re-run failed tests
|
|
|
|
self.log(f"Re-running {len(tests)} failed tests in verbose mode in subprocesses")
|
2023-09-08 22:37:48 -03:00
|
|
|
runtests = runtests.copy(
|
2023-09-09 23:30:43 -03:00
|
|
|
tests=tests,
|
2023-09-08 22:37:48 -03:00
|
|
|
rerun=True,
|
2023-09-09 06:18:14 -03:00
|
|
|
verbose=True,
|
2023-09-08 22:37:48 -03:00
|
|
|
forever=False,
|
|
|
|
fail_fast=False,
|
|
|
|
match_tests_dict=match_tests_dict,
|
|
|
|
output_on_failure=False)
|
2023-09-10 00:04:26 -03:00
|
|
|
self.logger.set_tests(runtests)
|
2023-09-09 21:24:38 -03:00
|
|
|
self._run_tests_mp(runtests, self.num_workers)
|
2023-09-08 22:37:48 -03:00
|
|
|
return runtests
|
2022-06-21 09:42:32 -03:00
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
def rerun_failed_tests(self, runtests: RunTests):
|
2023-09-09 20:41:21 -03:00
|
|
|
if self.python_cmd:
|
2022-06-21 09:42:32 -03:00
|
|
|
# Temp patch for https://github.com/python/cpython/issues/94052
|
|
|
|
self.log(
|
|
|
|
"Re-running failed tests is not supported with --python "
|
|
|
|
"host runner option."
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
self.first_state = self.get_state()
|
2021-09-07 13:21:00 -03:00
|
|
|
|
2023-09-03 18:37:15 -03:00
|
|
|
print()
|
2023-09-09 23:30:43 -03:00
|
|
|
rerun_runtests = self._rerun_failed_tests(runtests)
|
2019-04-26 04:28:53 -03:00
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
if self.results.bad:
|
|
|
|
print(count(len(self.results.bad), 'test'), "failed again:")
|
|
|
|
printlist(self.results.bad)
|
2015-09-29 21:32:11 -03:00
|
|
|
|
2023-09-08 22:37:48 -03:00
|
|
|
self.display_result(rerun_runtests)
|
2018-05-28 16:03:43 -03:00
|
|
|
|
2023-09-08 22:37:48 -03:00
|
|
|
def display_result(self, runtests):
|
2018-05-28 16:03:43 -03:00
|
|
|
# If running the test suite for PGO then no one cares about results.
|
2023-09-09 23:30:43 -03:00
|
|
|
if runtests.pgo:
|
2018-05-28 16:03:43 -03:00
|
|
|
return
|
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
state = self.get_state()
|
2018-05-28 16:03:43 -03:00
|
|
|
print()
|
2023-09-09 23:30:43 -03:00
|
|
|
print(f"== Tests result: {state} ==")
|
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
self.results.display_result(runtests.tests,
|
|
|
|
self.quiet, self.print_slowest)
|
2018-11-29 13:17:44 -04:00
|
|
|
|
2023-09-09 22:07:05 -03:00
|
|
|
def run_test(self, test_name: TestName, runtests: RunTests, tracer):
|
2023-09-08 21:30:28 -03:00
|
|
|
if tracer is not None:
|
2023-09-03 18:37:15 -03:00
|
|
|
# If we're tracing code coverage, then we don't exit with status
|
|
|
|
# if on a false return value from main.
|
2023-09-09 20:41:21 -03:00
|
|
|
cmd = ('result = run_single_test(test_name, runtests)')
|
2023-09-09 22:07:05 -03:00
|
|
|
namespace = dict(locals())
|
|
|
|
tracer.runctx(cmd, globals=globals(), locals=namespace)
|
|
|
|
result = namespace['result']
|
2023-09-03 18:37:15 -03:00
|
|
|
else:
|
2023-09-09 20:41:21 -03:00
|
|
|
result = run_single_test(test_name, runtests)
|
2023-09-03 18:37:15 -03:00
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
self.results.accumulate_result(result, runtests)
|
2023-09-03 18:37:15 -03:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
def run_tests_sequentially(self, runtests):
|
2023-09-09 22:07:05 -03:00
|
|
|
if self.coverage:
|
2015-09-29 19:59:35 -03:00
|
|
|
import trace
|
2023-09-08 21:30:28 -03:00
|
|
|
tracer = trace.Trace(trace=False, count=True)
|
|
|
|
else:
|
|
|
|
tracer = None
|
2015-09-29 19:59:35 -03:00
|
|
|
|
2015-09-29 17:48:52 -03:00
|
|
|
save_modules = sys.modules.keys()
|
|
|
|
|
2023-09-12 00:47:04 -03:00
|
|
|
jobs = runtests.get_jobs()
|
|
|
|
if jobs is not None:
|
2023-09-12 10:13:29 -03:00
|
|
|
tests = count(jobs, 'test')
|
2023-09-12 00:47:04 -03:00
|
|
|
else:
|
|
|
|
tests = 'tests'
|
|
|
|
msg = f"Run {tests} sequentially"
|
2023-09-08 22:37:48 -03:00
|
|
|
if runtests.timeout:
|
|
|
|
msg += " (timeout: %s)" % format_duration(runtests.timeout)
|
2020-04-14 13:29:44 -03:00
|
|
|
self.log(msg)
|
2016-03-24 07:55:29 -03:00
|
|
|
|
2016-03-23 08:14:10 -03:00
|
|
|
previous_test = None
|
2023-09-03 18:37:15 -03:00
|
|
|
tests_iter = runtests.iter_tests()
|
|
|
|
for test_index, test_name in enumerate(tests_iter, 1):
|
2023-09-02 13:09:36 -03:00
|
|
|
start_time = time.perf_counter()
|
2016-03-23 08:14:10 -03:00
|
|
|
|
2023-09-08 21:30:28 -03:00
|
|
|
text = test_name
|
|
|
|
if previous_test:
|
|
|
|
text = '%s -- %s' % (text, previous_test)
|
2023-09-10 22:46:26 -03:00
|
|
|
self.logger.display_progress(test_index, text)
|
2023-09-08 21:30:28 -03:00
|
|
|
|
|
|
|
result = self.run_test(test_name, runtests, tracer)
|
|
|
|
|
|
|
|
# Unload the newly imported modules (best effort finalization)
|
|
|
|
for module in sys.modules.keys():
|
|
|
|
if module not in save_modules and module.startswith("test."):
|
|
|
|
support.unload(module)
|
2019-04-25 23:08:53 -03:00
|
|
|
|
2023-09-09 21:24:38 -03:00
|
|
|
if result.must_stop(self.fail_fast, self.fail_env_changed):
|
2019-04-25 23:08:53 -03:00
|
|
|
break
|
|
|
|
|
2021-07-22 15:25:58 -03:00
|
|
|
previous_test = str(result)
|
2023-09-02 13:09:36 -03:00
|
|
|
test_time = time.perf_counter() - start_time
|
2016-03-23 08:14:10 -03:00
|
|
|
if test_time >= PROGRESS_MIN_TIME:
|
2016-08-17 07:22:52 -03:00
|
|
|
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
|
2023-09-02 13:09:36 -03:00
|
|
|
elif result.state == State.PASSED:
|
2016-05-20 08:37:40 -03:00
|
|
|
# be quiet: say nothing if the test passed shortly
|
2016-03-23 08:14:10 -03:00
|
|
|
previous_test = None
|
|
|
|
|
|
|
|
if previous_test:
|
|
|
|
print(previous_test)
|
|
|
|
|
2023-09-08 21:30:28 -03:00
|
|
|
return tracer
|
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
def get_state(self):
|
|
|
|
state = self.results.get_state(self.fail_env_changed)
|
2023-09-03 18:37:15 -03:00
|
|
|
if self.first_state:
|
2023-09-09 23:30:43 -03:00
|
|
|
state = f'{self.first_state} then {state}'
|
|
|
|
return state
|
2018-05-31 19:48:57 -03:00
|
|
|
|
2023-09-09 21:24:38 -03:00
|
|
|
def _run_tests_mp(self, runtests: RunTests, num_workers: int) -> None:
|
2023-09-11 04:02:35 -03:00
|
|
|
from .run_workers import RunWorkers
|
2023-09-10 22:46:26 -03:00
|
|
|
RunWorkers(num_workers, runtests, self.logger, self.results).run()
|
2015-09-26 05:38:01 -03:00
|
|
|
|
2023-09-08 21:30:28 -03:00
|
|
|
def finalize_tests(self, tracer):
|
2015-09-29 17:48:52 -03:00
|
|
|
if self.next_single_filename:
|
|
|
|
if self.next_single_test:
|
|
|
|
with open(self.next_single_filename, 'w') as fp:
|
|
|
|
fp.write(self.next_single_test + '\n')
|
|
|
|
else:
|
|
|
|
os.unlink(self.next_single_filename)
|
|
|
|
|
2023-09-08 21:30:28 -03:00
|
|
|
if tracer is not None:
|
|
|
|
results = tracer.results()
|
|
|
|
results.write_results(show_missing=True, summary=True,
|
2023-09-09 22:07:05 -03:00
|
|
|
coverdir=self.coverage_dir)
|
2015-09-29 17:48:52 -03:00
|
|
|
|
2023-09-09 22:07:05 -03:00
|
|
|
if self.want_run_leaks:
|
2015-09-29 17:48:52 -03:00
|
|
|
os.system("leaks %d" % os.getpid())
|
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
if self.junit_filename:
|
|
|
|
self.results.write_junit(self.junit_filename)
|
2023-09-03 18:37:15 -03:00
|
|
|
|
2023-09-02 13:09:36 -03:00
|
|
|
def display_summary(self):
|
2023-09-10 00:04:26 -03:00
|
|
|
duration = time.perf_counter() - self.logger.start_time
|
2023-09-08 20:48:54 -03:00
|
|
|
filtered = bool(self.match_tests) or bool(self.ignore_tests)
|
2023-09-02 13:09:36 -03:00
|
|
|
|
|
|
|
# Total duration
|
2023-09-03 18:37:15 -03:00
|
|
|
print()
|
2023-09-02 13:09:36 -03:00
|
|
|
print("Total duration: %s" % format_duration(duration))
|
|
|
|
|
2023-09-09 23:30:43 -03:00
|
|
|
self.results.display_summary(self.first_runtests, filtered)
|
2023-09-02 13:09:36 -03:00
|
|
|
|
|
|
|
# Result
|
2023-09-09 23:30:43 -03:00
|
|
|
state = self.get_state()
|
|
|
|
print(f"Result: {state}")
|
2018-09-18 13:10:26 -03:00
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
def create_run_tests(self, tests: TestTuple):
|
2023-09-10 00:04:26 -03:00
|
|
|
return RunTests(
|
2023-09-11 00:27:37 -03:00
|
|
|
tests,
|
2023-09-08 22:37:48 -03:00
|
|
|
fail_fast=self.fail_fast,
|
2023-09-12 00:35:08 -03:00
|
|
|
fail_env_changed=self.fail_env_changed,
|
2023-09-08 22:37:48 -03:00
|
|
|
match_tests=self.match_tests,
|
|
|
|
ignore_tests=self.ignore_tests,
|
2023-09-12 00:35:08 -03:00
|
|
|
match_tests_dict=None,
|
2023-09-14 15:33:18 -03:00
|
|
|
rerun=False,
|
2023-09-08 22:37:48 -03:00
|
|
|
forever=self.forever,
|
|
|
|
pgo=self.pgo,
|
|
|
|
pgo_extended=self.pgo_extended,
|
|
|
|
output_on_failure=self.output_on_failure,
|
2023-09-09 06:18:14 -03:00
|
|
|
timeout=self.timeout,
|
|
|
|
verbose=self.verbose,
|
|
|
|
quiet=self.quiet,
|
|
|
|
hunt_refleak=self.hunt_refleak,
|
|
|
|
test_dir=self.test_dir,
|
2023-09-10 20:11:22 -03:00
|
|
|
use_junit=(self.junit_filename is not None),
|
2023-09-09 20:41:21 -03:00
|
|
|
memory_limit=self.memory_limit,
|
|
|
|
gc_threshold=self.gc_threshold,
|
|
|
|
use_resources=self.use_resources,
|
|
|
|
python_cmd=self.python_cmd,
|
2023-09-12 00:35:08 -03:00
|
|
|
randomize=self.randomize,
|
|
|
|
random_seed=self.random_seed,
|
2023-09-12 19:41:25 -03:00
|
|
|
json_file=None,
|
2023-09-09 20:41:21 -03:00
|
|
|
)
|
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
|
2023-09-10 00:04:26 -03:00
|
|
|
if self.hunt_refleak and self.hunt_refleak.warmups < 3:
|
|
|
|
msg = ("WARNING: Running tests with --huntrleaks/-R and "
|
|
|
|
"less than 3 warmup repetitions can give false positives!")
|
|
|
|
print(msg, file=sys.stdout, flush=True)
|
|
|
|
|
|
|
|
if self.num_workers < 0:
|
|
|
|
# Use all CPUs + 2 extra worker processes for tests
|
|
|
|
# that like to sleep
|
2023-09-30 22:14:57 -03:00
|
|
|
self.num_workers = (os.process_cpu_count() or 1) + 2
|
2023-09-10 00:04:26 -03:00
|
|
|
|
|
|
|
# For a partial run, we do not need to clutter the output.
|
|
|
|
if (self.want_header
|
|
|
|
or not(self.pgo or self.quiet or self.single_test_run
|
2023-09-11 00:27:37 -03:00
|
|
|
or tests or self.cmdline_args)):
|
2023-09-30 19:37:23 -03:00
|
|
|
display_header(self.use_resources, self.python_cmd)
|
2023-09-10 00:04:26 -03:00
|
|
|
|
|
|
|
if self.randomize:
|
|
|
|
print("Using random seed", self.random_seed)
|
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
runtests = self.create_run_tests(selected)
|
2023-09-10 00:04:26 -03:00
|
|
|
self.first_runtests = runtests
|
|
|
|
self.logger.set_tests(runtests)
|
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
setup_process()
|
2023-09-08 22:37:48 -03:00
|
|
|
|
2023-09-25 22:05:07 -03:00
|
|
|
if self.hunt_refleak and not self.num_workers:
|
|
|
|
# gh-109739: WindowsLoadTracker thread interfers with refleak check
|
|
|
|
use_load_tracker = False
|
|
|
|
else:
|
|
|
|
# WindowsLoadTracker is only needed on Windows
|
|
|
|
use_load_tracker = MS_WINDOWS
|
|
|
|
|
|
|
|
if use_load_tracker:
|
|
|
|
self.logger.start_load_tracker()
|
2023-09-10 00:04:26 -03:00
|
|
|
try:
|
|
|
|
if self.num_workers:
|
|
|
|
self._run_tests_mp(runtests, self.num_workers)
|
|
|
|
tracer = None
|
|
|
|
else:
|
|
|
|
tracer = self.run_tests_sequentially(runtests)
|
2023-09-03 18:37:15 -03:00
|
|
|
|
2023-09-10 00:04:26 -03:00
|
|
|
self.display_result(runtests)
|
|
|
|
|
|
|
|
if self.want_rerun and self.results.need_rerun():
|
|
|
|
self.rerun_failed_tests(runtests)
|
|
|
|
finally:
|
2023-09-25 22:05:07 -03:00
|
|
|
if use_load_tracker:
|
|
|
|
self.logger.stop_load_tracker()
|
2023-09-03 18:37:15 -03:00
|
|
|
|
|
|
|
self.display_summary()
|
2023-09-08 21:30:28 -03:00
|
|
|
self.finalize_tests(tracer)
|
2023-09-03 18:37:15 -03:00
|
|
|
|
2023-09-10 00:04:26 -03:00
|
|
|
return self.results.get_exitcode(self.fail_env_changed,
|
|
|
|
self.fail_rerun)
|
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
def run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
|
|
|
|
os.makedirs(self.tmp_dir, exist_ok=True)
|
2023-09-12 10:13:29 -03:00
|
|
|
work_dir = get_work_dir(self.tmp_dir)
|
2023-09-11 00:27:37 -03:00
|
|
|
|
|
|
|
# Put a timeout on Python exit
|
|
|
|
with exit_timeout():
|
|
|
|
# Run the tests in a context manager that temporarily changes the
|
|
|
|
# CWD to a temporary and writable directory. If it's not possible
|
|
|
|
# to create or change the CWD, the original CWD will be used.
|
|
|
|
# The original CWD is available from os_helper.SAVEDCWD.
|
|
|
|
with os_helper.temp_cwd(work_dir, quiet=True):
|
|
|
|
# When using multiprocessing, worker processes will use
|
|
|
|
# work_dir as their parent temporary directory. So when the
|
|
|
|
# main process exit, it removes also subdirectories of worker
|
|
|
|
# processes.
|
|
|
|
return self._run_tests(selected, tests)
|
|
|
|
|
2023-09-30 19:37:23 -03:00
|
|
|
def _add_cross_compile_opts(self, regrtest_opts):
|
|
|
|
# WASM/WASI buildbot builders pass multiple PYTHON environment
|
|
|
|
# variables such as PYTHONPATH and _PYTHON_HOSTRUNNER.
|
|
|
|
keep_environ = bool(self.python_cmd)
|
|
|
|
environ = None
|
|
|
|
|
|
|
|
# Are we using cross-compilation?
|
|
|
|
cross_compile = is_cross_compiled()
|
|
|
|
|
|
|
|
# Get HOSTRUNNER
|
|
|
|
hostrunner = get_host_runner()
|
|
|
|
|
|
|
|
if cross_compile:
|
|
|
|
# emulate -E, but keep PYTHONPATH + cross compile env vars,
|
|
|
|
# so test executable can load correct sysconfigdata file.
|
|
|
|
keep = {
|
|
|
|
'_PYTHON_PROJECT_BASE',
|
|
|
|
'_PYTHON_HOST_PLATFORM',
|
|
|
|
'_PYTHON_SYSCONFIGDATA_NAME',
|
|
|
|
'PYTHONPATH'
|
|
|
|
}
|
|
|
|
old_environ = os.environ
|
|
|
|
new_environ = {
|
|
|
|
name: value for name, value in os.environ.items()
|
|
|
|
if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
|
|
|
|
}
|
|
|
|
# Only set environ if at least one variable was removed
|
|
|
|
if new_environ != old_environ:
|
|
|
|
environ = new_environ
|
|
|
|
keep_environ = True
|
|
|
|
|
|
|
|
if cross_compile and hostrunner:
|
|
|
|
if self.num_workers == 0:
|
|
|
|
# For now use only two cores for cross-compiled builds;
|
|
|
|
# hostrunner can be expensive.
|
|
|
|
regrtest_opts.extend(['-j', '2'])
|
|
|
|
|
|
|
|
# If HOSTRUNNER is set and -p/--python option is not given, then
|
|
|
|
# use hostrunner to execute python binary for tests.
|
|
|
|
if not self.python_cmd:
|
|
|
|
buildpython = sysconfig.get_config_var("BUILDPYTHON")
|
|
|
|
python_cmd = f"{hostrunner} {buildpython}"
|
|
|
|
regrtest_opts.extend(["--python", python_cmd])
|
|
|
|
keep_environ = True
|
|
|
|
|
|
|
|
return (environ, keep_environ)
|
|
|
|
|
|
|
|
def _add_ci_python_opts(self, python_opts, keep_environ):
|
|
|
|
# --fast-ci and --slow-ci add options to Python:
|
|
|
|
# "-u -W default -bb -E"
|
2023-09-26 18:59:11 -03:00
|
|
|
|
|
|
|
# Unbuffered stdout and stderr
|
|
|
|
if not sys.stdout.write_through:
|
|
|
|
python_opts.append('-u')
|
|
|
|
|
|
|
|
# Add warnings filter 'default'
|
|
|
|
if 'default' not in sys.warnoptions:
|
|
|
|
python_opts.extend(('-W', 'default'))
|
2023-09-26 15:46:52 -03:00
|
|
|
|
2023-09-26 18:59:11 -03:00
|
|
|
# Error on bytes/str comparison
|
|
|
|
if sys.flags.bytes_warning < 2:
|
|
|
|
python_opts.append('-bb')
|
2023-09-26 15:46:52 -03:00
|
|
|
|
2023-09-30 19:37:23 -03:00
|
|
|
if not keep_environ:
|
2023-09-27 07:01:16 -03:00
|
|
|
# Ignore PYTHON* environment variables
|
|
|
|
if not sys.flags.ignore_environment:
|
|
|
|
python_opts.append('-E')
|
2023-09-26 18:59:11 -03:00
|
|
|
|
2023-09-30 19:37:23 -03:00
|
|
|
def _execute_python(self, cmd, environ):
|
2023-09-26 15:46:52 -03:00
|
|
|
# Make sure that messages before execv() are logged
|
|
|
|
sys.stdout.flush()
|
|
|
|
sys.stderr.flush()
|
|
|
|
|
2023-09-26 18:59:11 -03:00
|
|
|
cmd_text = shlex.join(cmd)
|
2023-09-26 15:46:52 -03:00
|
|
|
try:
|
2023-09-30 19:37:23 -03:00
|
|
|
print(f"+ {cmd_text}", flush=True)
|
|
|
|
|
2023-09-26 18:59:11 -03:00
|
|
|
if hasattr(os, 'execv') and not MS_WINDOWS:
|
|
|
|
os.execv(cmd[0], cmd)
|
2023-09-28 21:51:22 -03:00
|
|
|
# On success, execv() do no return.
|
|
|
|
# On error, it raises an OSError.
|
2023-09-26 18:59:11 -03:00
|
|
|
else:
|
|
|
|
import subprocess
|
2023-09-30 19:37:23 -03:00
|
|
|
with subprocess.Popen(cmd, env=environ) as proc:
|
2023-09-28 21:51:22 -03:00
|
|
|
try:
|
|
|
|
proc.wait()
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
# There is no need to call proc.terminate(): on CTRL+C,
|
|
|
|
# SIGTERM is also sent to the child process.
|
|
|
|
try:
|
|
|
|
proc.wait(timeout=EXIT_TIMEOUT)
|
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
proc.kill()
|
|
|
|
proc.wait()
|
|
|
|
sys.exit(EXITCODE_INTERRUPTED)
|
|
|
|
|
2023-09-26 18:59:11 -03:00
|
|
|
sys.exit(proc.returncode)
|
|
|
|
except Exception as exc:
|
|
|
|
print_warning(f"Failed to change Python options: {exc!r}\n"
|
2023-09-26 15:46:52 -03:00
|
|
|
f"Command: {cmd_text}")
|
2023-09-26 18:59:11 -03:00
|
|
|
# continue executing main()
|
2023-09-26 15:46:52 -03:00
|
|
|
|
2023-09-30 19:37:23 -03:00
|
|
|
def _add_python_opts(self):
|
|
|
|
python_opts = []
|
|
|
|
regrtest_opts = []
|
|
|
|
|
|
|
|
environ, keep_environ = self._add_cross_compile_opts(regrtest_opts)
|
|
|
|
if self.ci_mode:
|
|
|
|
self._add_ci_python_opts(python_opts, keep_environ)
|
|
|
|
|
|
|
|
if (not python_opts) and (not regrtest_opts) and (environ is None):
|
|
|
|
# Nothing changed: nothing to do
|
|
|
|
return
|
|
|
|
|
|
|
|
# Create new command line
|
|
|
|
cmd = list(sys.orig_argv)
|
|
|
|
if python_opts:
|
|
|
|
cmd[1:1] = python_opts
|
|
|
|
if regrtest_opts:
|
|
|
|
cmd.extend(regrtest_opts)
|
|
|
|
cmd.append("--dont-add-python-opts")
|
|
|
|
|
|
|
|
self._execute_python(cmd, environ)
|
|
|
|
|
2023-09-26 16:34:50 -03:00
|
|
|
def _init(self):
|
|
|
|
# Set sys.stdout encoder error handler to backslashreplace,
|
|
|
|
# similar to sys.stderr error handler, to avoid UnicodeEncodeError
|
|
|
|
# when printing a traceback or any other non-encodable character.
|
|
|
|
sys.stdout.reconfigure(errors="backslashreplace")
|
2023-09-26 15:46:52 -03:00
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
if self.junit_filename and not os.path.isabs(self.junit_filename):
|
|
|
|
self.junit_filename = os.path.abspath(self.junit_filename)
|
|
|
|
|
|
|
|
strip_py_suffix(self.cmdline_args)
|
|
|
|
|
|
|
|
self.tmp_dir = get_temp_dir(self.tmp_dir)
|
|
|
|
|
2023-09-26 16:34:50 -03:00
|
|
|
def main(self, tests: TestList | None = None):
|
2023-09-26 18:59:11 -03:00
|
|
|
if self.want_add_python_opts:
|
|
|
|
self._add_python_opts()
|
2023-09-26 16:34:50 -03:00
|
|
|
|
|
|
|
self._init()
|
|
|
|
|
2023-09-11 00:27:37 -03:00
|
|
|
if self.want_cleanup:
|
2023-09-11 05:52:03 -03:00
|
|
|
cleanup_temp_dir(self.tmp_dir)
|
2023-09-11 00:27:37 -03:00
|
|
|
sys.exit(0)
|
|
|
|
|
2023-09-08 20:48:54 -03:00
|
|
|
if self.want_wait:
|
2015-09-29 19:59:35 -03:00
|
|
|
input("Press any key to continue...")
|
|
|
|
|
2023-09-09 06:18:14 -03:00
|
|
|
setup_test_dir(self.test_dir)
|
2023-09-11 00:27:37 -03:00
|
|
|
selected, tests = self.find_tests(tests)
|
2015-09-29 19:59:35 -03:00
|
|
|
|
2023-09-03 18:37:15 -03:00
|
|
|
exitcode = 0
|
2023-09-08 20:48:54 -03:00
|
|
|
if self.want_list_tests:
|
2023-09-11 00:27:37 -03:00
|
|
|
self.list_tests(selected)
|
2023-09-08 20:48:54 -03:00
|
|
|
elif self.want_list_cases:
|
2023-09-11 05:52:03 -03:00
|
|
|
list_cases(selected,
|
|
|
|
match_tests=self.match_tests,
|
|
|
|
ignore_tests=self.ignore_tests,
|
|
|
|
test_dir=self.test_dir)
|
2023-09-03 18:37:15 -03:00
|
|
|
else:
|
2023-09-11 00:27:37 -03:00
|
|
|
exitcode = self.run_tests(selected, tests)
|
2018-09-18 13:10:26 -03:00
|
|
|
|
2023-09-03 18:37:15 -03:00
|
|
|
sys.exit(exitcode)
|
2015-09-26 05:38:01 -03:00
|
|
|
|
|
|
|
|
2023-09-26 18:59:11 -03:00
|
|
|
def main(tests=None, _add_python_opts=False, **kwargs):
|
2016-03-24 13:53:20 -03:00
|
|
|
"""Run the Python suite."""
|
2023-09-08 19:41:26 -03:00
|
|
|
ns = _parse_args(sys.argv[1:], **kwargs)
|
2023-09-26 18:59:11 -03:00
|
|
|
Regrtest(ns, _add_python_opts=_add_python_opts).main(tests=tests)
|