#11390: convert doctest CLI to argparse and add -o and -f options.
This provides a way to specify arbitrary doctest options when using the CLI interface to process test files, just as one can when calling testmod or testfile programmatically.
This commit is contained in:
parent
c00fffb659
commit
5707d508e1
|
@ -495,7 +495,10 @@ Option Flags
|
|||
A number of option flags control various aspects of doctest's behavior.
|
||||
Symbolic names for the flags are supplied as module constants, which can be
|
||||
or'ed together and passed to various functions. The names can also be used in
|
||||
:ref:`doctest directives <doctest-directives>`.
|
||||
:ref:`doctest directives <doctest-directives>`, and may be passed to the
|
||||
doctest command line interface via the ``-o`` option.
|
||||
|
||||
.. versionadded:: 3.4 the ``-o`` command line option
|
||||
|
||||
The first group of options define test semantics, controlling aspects of how
|
||||
doctest decides whether actual output matches an example's expected output:
|
||||
|
@ -640,6 +643,9 @@ The second group of options controls how test failures are reported:
|
|||
1. This flag may be useful during debugging, since examples after the first
|
||||
failure won't even produce debugging output.
|
||||
|
||||
The doctest command line accepts the option ``-f`` as a shorthand for ``-o
|
||||
FAIL_FAST``.
|
||||
|
||||
.. versionadded:: 3.4
|
||||
|
||||
|
||||
|
|
|
@ -173,8 +173,15 @@ instructions.
|
|||
doctest
|
||||
-------
|
||||
|
||||
Added ``FAIL_FAST`` flag to halt test running as soon as the first failure is
|
||||
detected. (Contributed by R. David Murray and Daniel Urban in :issue:`16522`.)
|
||||
Added :data:`~doctest.FAIL_FAST` flag to halt test running as soon as the first
|
||||
failure is detected. (Contributed by R. David Murray and Daniel Urban in
|
||||
:issue:`16522`.)
|
||||
|
||||
Updated the doctest command line interface to use :mod:`argparse`, and added
|
||||
``-o`` and ``-f`` options to the interface. ``-o`` allows doctest options to
|
||||
be specified on the command line, and ``-f`` is a shorthand for ``-o
|
||||
FAIL_FAST`` (to parallel the similar option supported by the :mod:`unittest`
|
||||
CLI). (Contributed by R. David Murray in :issue:`11390`.)
|
||||
|
||||
|
||||
functools
|
||||
|
|
|
@ -93,6 +93,7 @@ __all__ = [
|
|||
]
|
||||
|
||||
import __future__
|
||||
import argparse
|
||||
import difflib
|
||||
import inspect
|
||||
import linecache
|
||||
|
@ -2708,13 +2709,30 @@ __test__ = {"_TestClass": _TestClass,
|
|||
|
||||
|
||||
def _test():
|
||||
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
|
||||
if not testfiles:
|
||||
name = os.path.basename(sys.argv[0])
|
||||
if '__loader__' in globals(): # python -m
|
||||
name, _ = os.path.splitext(name)
|
||||
print("usage: {0} [-v] file ...".format(name))
|
||||
return 2
|
||||
parser = argparse.ArgumentParser(description="doctest runner")
|
||||
parser.add_argument('-v', '--verbose', action='store_true', default=False,
|
||||
help='print very verbose output for all tests')
|
||||
parser.add_argument('-o', '--option', action='append',
|
||||
choices=OPTIONFLAGS_BY_NAME.keys(), default=[],
|
||||
help=('specify a doctest option flag to apply'
|
||||
' to the test run; may be specified more'
|
||||
' than once to apply multiple options'))
|
||||
parser.add_argument('-f', '--fail-fast', action='store_true',
|
||||
help=('stop running tests after first failure (this'
|
||||
' is a shorthand for -o FAIL_FAST, and is'
|
||||
' in addition to any other -o options)'))
|
||||
parser.add_argument('file', nargs='+',
|
||||
help='file containing the tests to run')
|
||||
args = parser.parse_args()
|
||||
testfiles = args.file
|
||||
# Verbose used to be handled by the "inspect argv" magic in DocTestRunner,
|
||||
# but since we are using argparse we are passing it manually now.
|
||||
verbose = args.verbose
|
||||
options = 0
|
||||
for option in args.option:
|
||||
options |= OPTIONFLAGS_BY_NAME[option]
|
||||
if args.fail_fast:
|
||||
options |= FAIL_FAST
|
||||
for filename in testfiles:
|
||||
if filename.endswith(".py"):
|
||||
# It is a module -- insert its dir into sys.path and try to
|
||||
|
@ -2724,9 +2742,10 @@ def _test():
|
|||
sys.path.insert(0, dirname)
|
||||
m = __import__(filename[:-3])
|
||||
del sys.path[0]
|
||||
failures, _ = testmod(m)
|
||||
failures, _ = testmod(m, verbose=verbose, optionflags=options)
|
||||
else:
|
||||
failures, _ = testfile(filename, module_relative=False)
|
||||
failures, _ = testfile(filename, module_relative=False,
|
||||
verbose=verbose, optionflags=options)
|
||||
if failures:
|
||||
return 1
|
||||
return 0
|
||||
|
|
|
@ -2596,6 +2596,232 @@ Check doctest with a non-ascii filename:
|
|||
TestResults(failed=1, attempted=1)
|
||||
"""
|
||||
|
||||
def test_CLI(): r"""
|
||||
The doctest module can be used to run doctests against an arbitrary file.
|
||||
These tests test this CLI functionality.
|
||||
|
||||
We'll use the support module's script_helpers for this, and write a test files
|
||||
to a temp dir to run the command against.
|
||||
|
||||
First, a file with two simple tests and no errors. We'll run both the
|
||||
unadorned doctest command, and the verbose version, and then check the output:
|
||||
|
||||
>>> from test import script_helper
|
||||
>>> with script_helper.temp_dir() as tmpdir:
|
||||
... fn = os.path.join(tmpdir, 'myfile.doc')
|
||||
... with open(fn, 'w') as f:
|
||||
... _ = f.write('This is a very simple test file.\n')
|
||||
... _ = f.write(' >>> 1 + 1\n')
|
||||
... _ = f.write(' 2\n')
|
||||
... _ = f.write(' >>> "a"\n')
|
||||
... _ = f.write(" 'a'\n")
|
||||
... _ = f.write('\n')
|
||||
... _ = f.write('And that is it.\n')
|
||||
... rc1, out1, err1 = script_helper.assert_python_ok(
|
||||
... '-m', 'doctest', fn)
|
||||
... rc2, out2, err2 = script_helper.assert_python_ok(
|
||||
... '-m', 'doctest', '-v', fn)
|
||||
|
||||
With no arguments and passing tests, we should get no output:
|
||||
|
||||
>>> rc1, out1, err1
|
||||
(0, b'', b'')
|
||||
|
||||
With the verbose flag, we should see the test output, but no error output:
|
||||
|
||||
>>> rc2, err2
|
||||
(0, b'')
|
||||
>>> print(out2.decode())
|
||||
Trying:
|
||||
1 + 1
|
||||
Expecting:
|
||||
2
|
||||
ok
|
||||
Trying:
|
||||
"a"
|
||||
Expecting:
|
||||
'a'
|
||||
ok
|
||||
1 items passed all tests:
|
||||
2 tests in myfile.doc
|
||||
2 tests in 1 items.
|
||||
2 passed and 0 failed.
|
||||
Test passed.
|
||||
<BLANKLINE>
|
||||
|
||||
Now we'll write a couple files, one with three tests, the other a python module
|
||||
with two tests, both of the files having "errors" in the tests that can be made
|
||||
non-errors by applying the appropriate doctest options to the run (ELLIPSIS in
|
||||
the first file, NORMALIZE_WHITESPACE in the second). This combination will
|
||||
allow to thoroughly test the -f and -o flags, as well as the doctest command's
|
||||
ability to process more than one file on the command line and, since the second
|
||||
file ends in '.py', its handling of python module files (as opposed to straight
|
||||
text files).
|
||||
|
||||
>>> from test import script_helper
|
||||
>>> with script_helper.temp_dir() as tmpdir:
|
||||
... fn = os.path.join(tmpdir, 'myfile.doc')
|
||||
... with open(fn, 'w') as f:
|
||||
... _ = f.write('This is another simple test file.\n')
|
||||
... _ = f.write(' >>> 1 + 1\n')
|
||||
... _ = f.write(' 2\n')
|
||||
... _ = f.write(' >>> "abcdef"\n')
|
||||
... _ = f.write(" 'a...f'\n")
|
||||
... _ = f.write(' >>> "ajkml"\n')
|
||||
... _ = f.write(" 'a...l'\n")
|
||||
... _ = f.write('\n')
|
||||
... _ = f.write('And that is it.\n')
|
||||
... fn2 = os.path.join(tmpdir, 'myfile2.py')
|
||||
... with open(fn2, 'w') as f:
|
||||
... _ = f.write('def test_func():\n')
|
||||
... _ = f.write(' \"\"\"\n')
|
||||
... _ = f.write(' This is simple python test function.\n')
|
||||
... _ = f.write(' >>> 1 + 1\n')
|
||||
... _ = f.write(' 2\n')
|
||||
... _ = f.write(' >>> "abc def"\n')
|
||||
... _ = f.write(" 'abc def'\n")
|
||||
... _ = f.write("\n")
|
||||
... _ = f.write(' \"\"\"\n')
|
||||
... import shutil
|
||||
... rc1, out1, err1 = script_helper.assert_python_failure(
|
||||
... '-m', 'doctest', fn, fn2)
|
||||
... rc2, out2, err2 = script_helper.assert_python_ok(
|
||||
... '-m', 'doctest', '-o', 'ELLIPSIS', fn)
|
||||
... rc3, out3, err3 = script_helper.assert_python_ok(
|
||||
... '-m', 'doctest', '-o', 'ELLIPSIS',
|
||||
... '-o', 'NORMALIZE_WHITESPACE', fn, fn2)
|
||||
... rc4, out4, err4 = script_helper.assert_python_failure(
|
||||
... '-m', 'doctest', '-f', fn, fn2)
|
||||
... rc5, out5, err5 = script_helper.assert_python_ok(
|
||||
... '-m', 'doctest', '-v', '-o', 'ELLIPSIS',
|
||||
... '-o', 'NORMALIZE_WHITESPACE', fn, fn2)
|
||||
|
||||
Our first test run will show the errors from the first file (doctest stops if a
|
||||
file has errors). Note that doctest test-run error output appears on stdout,
|
||||
not stderr:
|
||||
|
||||
>>> rc1, err1
|
||||
(1, b'')
|
||||
>>> print(out1.decode()) # doctest: +ELLIPSIS
|
||||
**********************************************************************
|
||||
File "...myfile.doc", line 4, in myfile.doc
|
||||
Failed example:
|
||||
"abcdef"
|
||||
Expected:
|
||||
'a...f'
|
||||
Got:
|
||||
'abcdef'
|
||||
**********************************************************************
|
||||
File "...myfile.doc", line 6, in myfile.doc
|
||||
Failed example:
|
||||
"ajkml"
|
||||
Expected:
|
||||
'a...l'
|
||||
Got:
|
||||
'ajkml'
|
||||
**********************************************************************
|
||||
1 items had failures:
|
||||
2 of 3 in myfile.doc
|
||||
***Test Failed*** 2 failures.
|
||||
<BLANKLINE>
|
||||
|
||||
With -o ELLIPSIS specified, the second run, against just the first file, should
|
||||
produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither
|
||||
should the third, which ran against both files:
|
||||
|
||||
>>> rc2, out2, err2
|
||||
(0, b'', b'')
|
||||
>>> rc3, out3, err3
|
||||
(0, b'', b'')
|
||||
|
||||
The fourth run uses FAIL_FAST, so we should see only one error:
|
||||
|
||||
>>> rc4, err4
|
||||
(1, b'')
|
||||
>>> print(out4.decode()) # doctest: +ELLIPSIS
|
||||
**********************************************************************
|
||||
File "...myfile.doc", line 4, in myfile.doc
|
||||
Failed example:
|
||||
"abcdef"
|
||||
Expected:
|
||||
'a...f'
|
||||
Got:
|
||||
'abcdef'
|
||||
**********************************************************************
|
||||
1 items had failures:
|
||||
1 of 2 in myfile.doc
|
||||
***Test Failed*** 1 failures.
|
||||
<BLANKLINE>
|
||||
|
||||
The fifth test uses verbose with the two options, so we should get verbose
|
||||
success output for the tests in both files:
|
||||
|
||||
>>> rc5, err5
|
||||
(0, b'')
|
||||
>>> print(out5.decode())
|
||||
Trying:
|
||||
1 + 1
|
||||
Expecting:
|
||||
2
|
||||
ok
|
||||
Trying:
|
||||
"abcdef"
|
||||
Expecting:
|
||||
'a...f'
|
||||
ok
|
||||
Trying:
|
||||
"ajkml"
|
||||
Expecting:
|
||||
'a...l'
|
||||
ok
|
||||
1 items passed all tests:
|
||||
3 tests in myfile.doc
|
||||
3 tests in 1 items.
|
||||
3 passed and 0 failed.
|
||||
Test passed.
|
||||
Trying:
|
||||
1 + 1
|
||||
Expecting:
|
||||
2
|
||||
ok
|
||||
Trying:
|
||||
"abc def"
|
||||
Expecting:
|
||||
'abc def'
|
||||
ok
|
||||
1 items had no tests:
|
||||
myfile2
|
||||
1 items passed all tests:
|
||||
2 tests in myfile2.test_func
|
||||
2 tests in 2 items.
|
||||
2 passed and 0 failed.
|
||||
Test passed.
|
||||
<BLANKLINE>
|
||||
|
||||
We should also check some typical error cases.
|
||||
|
||||
Invalid file name:
|
||||
|
||||
>>> rc, out, err = script_helper.assert_python_failure(
|
||||
... '-m', 'doctest', 'nosuchfile')
|
||||
>>> rc, out
|
||||
(1, b'')
|
||||
>>> print(err.decode()) # doctest: +ELLIPSIS
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
FileNotFoundError: [Errno ...] No such file or directory: 'nosuchfile'
|
||||
|
||||
Invalid doctest option:
|
||||
|
||||
>>> rc, out, err = script_helper.assert_python_failure(
|
||||
... '-m', 'doctest', '-o', 'nosuchoption')
|
||||
>>> rc, out
|
||||
(2, b'')
|
||||
>>> print(err.decode()) # doctest: +ELLIPSIS
|
||||
usage...invalid...nosuchoption...
|
||||
|
||||
"""
|
||||
|
||||
######################################################################
|
||||
## Main
|
||||
######################################################################
|
||||
|
|
|
@ -129,6 +129,9 @@ Core and Builtins
|
|||
Library
|
||||
-------
|
||||
|
||||
- Issue #11390: Add -o and -f command line options to the doctest CLI to
|
||||
specify doctest options (and convert it to using argparse).
|
||||
|
||||
- Issue #18135: Fix a possible integer overflow in ssl.SSLSocket.write()
|
||||
and in ssl.SSLContext.load_cert_chain() for strings and passwords longer than
|
||||
2 gigabytes.
|
||||
|
|
Loading…
Reference in New Issue