2059 lines
75 KiB
Python
2059 lines
75 KiB
Python
# Module doctest.
|
|
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
|
|
# Significant enhancements by:
|
|
# Jim Fulton
|
|
# Edward Loper
|
|
|
|
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
|
|
|
|
# [XX] This docstring is out-of-date:
|
|
r"""Module doctest -- a framework for running examples in docstrings.
|
|
|
|
NORMAL USAGE
|
|
|
|
In normal use, end each module M with:
|
|
|
|
def _test():
|
|
import doctest, M # replace M with your module's name
|
|
return doctest.testmod(M) # ditto
|
|
|
|
if __name__ == "__main__":
|
|
_test()
|
|
|
|
Then running the module as a script will cause the examples in the
|
|
docstrings to get executed and verified:
|
|
|
|
python M.py
|
|
|
|
This won't display anything unless an example fails, in which case the
|
|
failing example(s) and the cause(s) of the failure(s) are printed to stdout
|
|
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
|
|
line of output is "Test failed.".
|
|
|
|
Run it with the -v switch instead:
|
|
|
|
python M.py -v
|
|
|
|
and a detailed report of all examples tried is printed to stdout, along
|
|
with assorted summaries at the end.
|
|
|
|
You can force verbose mode by passing "verbose=1" to testmod, or prohibit
|
|
it by passing "verbose=0". In either of those cases, sys.argv is not
|
|
examined by testmod.
|
|
|
|
In any case, testmod returns a 2-tuple of ints (f, t), where f is the
|
|
number of docstring examples that failed and t is the total number of
|
|
docstring examples attempted.
|
|
|
|
|
|
WHICH DOCSTRINGS ARE EXAMINED?
|
|
|
|
+ M.__doc__.
|
|
|
|
+ f.__doc__ for all functions f in M.__dict__.values(), except those
|
|
defined in other modules.
|
|
|
|
+ C.__doc__ for all classes C in M.__dict__.values(), except those
|
|
defined in other modules.
|
|
|
|
+ If M.__test__ exists and "is true", it must be a dict, and
|
|
each entry maps a (string) name to a function object, class object, or
|
|
string. Function and class object docstrings found from M.__test__
|
|
are searched even if the name is private, and strings are searched
|
|
directly as if they were docstrings. In output, a key K in M.__test__
|
|
appears with name
|
|
<name of M>.__test__.K
|
|
|
|
Any classes found are recursively searched similarly, to test docstrings in
|
|
their contained methods and nested classes. All names reached from
|
|
M.__test__ are searched.
|
|
|
|
Optionally, functions with private names can be skipped (unless listed in
|
|
M.__test__) by supplying a function to the "isprivate" argument that will
|
|
identify private functions. For convenience, one such function is
|
|
supplied. docttest.is_private considers a name to be private if it begins
|
|
with an underscore (like "_my_func") but doesn't both begin and end with
|
|
(at least) two underscores (like "__init__"). By supplying this function
|
|
or your own "isprivate" function to testmod, the behavior can be customized.
|
|
|
|
If you want to test docstrings in objects with private names too, stuff
|
|
them into an M.__test__ dict, or see ADVANCED USAGE below (e.g., pass your
|
|
own isprivate function to Tester's constructor, or call the rundoc method
|
|
of a Tester instance).
|
|
|
|
WHAT'S THE EXECUTION CONTEXT?
|
|
|
|
By default, each time testmod finds a docstring to test, it uses a *copy*
|
|
of M's globals (so that running tests on a module doesn't change the
|
|
module's real globals, and so that one test in M can't leave behind crumbs
|
|
that accidentally allow another test to work). This means examples can
|
|
freely use any names defined at top-level in M. It also means that sloppy
|
|
imports (see above) can cause examples in external docstrings to use
|
|
globals inappropriate for them.
|
|
|
|
You can force use of your own dict as the execution context by passing
|
|
"globs=your_dict" to testmod instead. Presumably this would be a copy of
|
|
M.__dict__ merged with the globals from other imported modules.
|
|
|
|
|
|
WHAT IF I WANT TO TEST A WHOLE PACKAGE?
|
|
|
|
Piece o' cake, provided the modules do their testing from docstrings.
|
|
Here's the test.py I use for the world's most elaborate Rational/
|
|
floating-base-conversion pkg (which I'll distribute some day):
|
|
|
|
from Rational import Cvt
|
|
from Rational import Format
|
|
from Rational import machprec
|
|
from Rational import Rat
|
|
from Rational import Round
|
|
from Rational import utils
|
|
|
|
modules = (Cvt,
|
|
Format,
|
|
machprec,
|
|
Rat,
|
|
Round,
|
|
utils)
|
|
|
|
def _test():
|
|
import doctest
|
|
import sys
|
|
verbose = "-v" in sys.argv
|
|
for mod in modules:
|
|
doctest.testmod(mod, verbose=verbose, report=0)
|
|
doctest.master.summarize()
|
|
|
|
if __name__ == "__main__":
|
|
_test()
|
|
|
|
IOW, it just runs testmod on all the pkg modules. testmod remembers the
|
|
names and outcomes (# of failures, # of tries) for each item it's seen, and
|
|
passing "report=0" prevents it from printing a summary in verbose mode.
|
|
Instead, the summary is delayed until all modules have been tested, and
|
|
then "doctest.master.summarize()" forces the summary at the end.
|
|
|
|
So this is very nice in practice: each module can be tested individually
|
|
with almost no work beyond writing up docstring examples, and collections
|
|
of modules can be tested too as a unit with no more work than the above.
|
|
|
|
|
|
WHAT ABOUT EXCEPTIONS?
|
|
|
|
No problem, as long as the only output generated by the example is the
|
|
traceback itself. For example:
|
|
|
|
>>> [1, 2, 3].remove(42)
|
|
Traceback (most recent call last):
|
|
File "<stdin>", line 1, in ?
|
|
ValueError: list.remove(x): x not in list
|
|
>>>
|
|
|
|
Note that only the exception type and value are compared (specifically,
|
|
only the last line in the traceback).
|
|
|
|
|
|
ADVANCED USAGE
|
|
|
|
doctest.testmod() captures the testing policy I find most useful most
|
|
often. You may want other policies.
|
|
|
|
testmod() actually creates a local instance of class doctest.Tester, runs
|
|
appropriate methods of that class, and merges the results into global
|
|
Tester instance doctest.master.
|
|
|
|
You can create your own instances of doctest.Tester, and so build your own
|
|
policies, or even run methods of doctest.master directly. See
|
|
doctest.Tester.__doc__ for details.
|
|
|
|
|
|
SO WHAT DOES A DOCSTRING EXAMPLE LOOK LIKE ALREADY!?
|
|
|
|
Oh ya. It's easy! In most cases a copy-and-paste of an interactive
|
|
console session works fine -- just make sure the leading whitespace is
|
|
rigidly consistent (you can mix tabs and spaces if you're too lazy to do it
|
|
right, but doctest is not in the business of guessing what you think a tab
|
|
means).
|
|
|
|
>>> # comments are ignored
|
|
>>> x = 12
|
|
>>> x
|
|
12
|
|
>>> if x == 13:
|
|
... print "yes"
|
|
... else:
|
|
... print "no"
|
|
... print "NO"
|
|
... print "NO!!!"
|
|
...
|
|
no
|
|
NO
|
|
NO!!!
|
|
>>>
|
|
|
|
Any expected output must immediately follow the final ">>>" or "..." line
|
|
containing the code, and the expected output (if any) extends to the next
|
|
">>>" or all-whitespace line. That's it.
|
|
|
|
Bummers:
|
|
|
|
+ Expected output cannot contain an all-whitespace line, since such a line
|
|
is taken to signal the end of expected output.
|
|
|
|
+ Output to stdout is captured, but not output to stderr (exception
|
|
tracebacks are captured via a different means).
|
|
|
|
+ If you continue a line via backslashing in an interactive session,
|
|
or for any other reason use a backslash, you should use a raw
|
|
docstring, which will preserve your backslahses exactly as you type
|
|
them:
|
|
|
|
>>> def f(x):
|
|
... r'''Backslashes in a raw docstring: m\n'''
|
|
>>> print f.__doc__
|
|
Backslashes in a raw docstring: m\n
|
|
|
|
Otherwise, the backslash will be interpreted as part of the string.
|
|
E.g., the "\n" above would be interpreted as a newline character.
|
|
Alternatively, you can double each backslash in the doctest version
|
|
(and not use a raw string):
|
|
|
|
>>> def f(x):
|
|
... '''Backslashes in a raw docstring: m\\n'''
|
|
>>> print f.__doc__
|
|
Backslashes in a raw docstring: m\n
|
|
|
|
The starting column doesn't matter:
|
|
|
|
>>> assert "Easy!"
|
|
>>> import math
|
|
>>> math.floor(1.9)
|
|
1.0
|
|
|
|
and as many leading whitespace characters are stripped from the expected
|
|
output as appeared in the initial ">>>" line that triggered it.
|
|
|
|
If you execute this very file, the examples above will be found and
|
|
executed, leading to this output in verbose mode:
|
|
|
|
Running doctest.__doc__
|
|
Trying: [1, 2, 3].remove(42)
|
|
Expecting:
|
|
Traceback (most recent call last):
|
|
File "<stdin>", line 1, in ?
|
|
ValueError: list.remove(x): x not in list
|
|
ok
|
|
Trying: x = 12
|
|
Expecting: nothing
|
|
ok
|
|
Trying: x
|
|
Expecting: 12
|
|
ok
|
|
Trying:
|
|
if x == 13:
|
|
print "yes"
|
|
else:
|
|
print "no"
|
|
print "NO"
|
|
print "NO!!!"
|
|
Expecting:
|
|
no
|
|
NO
|
|
NO!!!
|
|
ok
|
|
... and a bunch more like that, with this summary at the end:
|
|
|
|
5 items had no tests:
|
|
doctest.Tester.__init__
|
|
doctest.Tester.run__test__
|
|
doctest.Tester.summarize
|
|
doctest.run_docstring_examples
|
|
doctest.testmod
|
|
12 items passed all tests:
|
|
8 tests in doctest
|
|
6 tests in doctest.Tester
|
|
10 tests in doctest.Tester.merge
|
|
14 tests in doctest.Tester.rundict
|
|
3 tests in doctest.Tester.rundoc
|
|
3 tests in doctest.Tester.runstring
|
|
2 tests in doctest.__test__._TestClass
|
|
2 tests in doctest.__test__._TestClass.__init__
|
|
2 tests in doctest.__test__._TestClass.get
|
|
1 tests in doctest.__test__._TestClass.square
|
|
2 tests in doctest.__test__.string
|
|
7 tests in doctest.is_private
|
|
60 tests in 17 items.
|
|
60 passed and 0 failed.
|
|
Test passed.
|
|
"""
|
|
|
|
__all__ = [
|
|
'is_private',
|
|
'Example',
|
|
'DocTest',
|
|
'DocTestFinder',
|
|
'DocTestRunner',
|
|
'testmod',
|
|
'run_docstring_examples',
|
|
'Tester',
|
|
'DocTestTestCase',
|
|
'DocTestSuite',
|
|
'testsource',
|
|
'debug',
|
|
# 'master',
|
|
]
|
|
|
|
import __future__
|
|
|
|
import sys, traceback, inspect, linecache, re, types
|
|
import unittest, difflib, tempfile
|
|
from StringIO import StringIO
|
|
|
|
# Option constants.
|
|
DONT_ACCEPT_TRUE_FOR_1 = 1 << 0
|
|
DONT_ACCEPT_BLANKLINE = 1 << 1
|
|
NORMALIZE_WHITESPACE = 1 << 2
|
|
ELLIPSIS = 1 << 3
|
|
UNIFIED_DIFF = 1 << 4
|
|
CONTEXT_DIFF = 1 << 5
|
|
|
|
OPTIONFLAGS_BY_NAME = {
|
|
'DONT_ACCEPT_TRUE_FOR_1': DONT_ACCEPT_TRUE_FOR_1,
|
|
'DONT_ACCEPT_BLANKLINE': DONT_ACCEPT_BLANKLINE,
|
|
'NORMALIZE_WHITESPACE': NORMALIZE_WHITESPACE,
|
|
'ELLIPSIS': ELLIPSIS,
|
|
'UNIFIED_DIFF': UNIFIED_DIFF,
|
|
'CONTEXT_DIFF': CONTEXT_DIFF,
|
|
}
|
|
|
|
# Special string markers for use in `want` strings:
|
|
BLANKLINE_MARKER = '<BLANKLINE>'
|
|
ELLIPSIS_MARKER = '...'
|
|
|
|
######################################################################
|
|
## Table of Contents
|
|
######################################################################
|
|
# 1. Utility Functions
|
|
# 2. Example & DocTest -- store test cases
|
|
# 3. DocTest Finder -- extracts test cases from objects
|
|
# 4. DocTest Runner -- runs test cases
|
|
# 5. Test Functions -- convenient wrappers for testing
|
|
# 6. Tester Class -- for backwards compatibility
|
|
# 7. Unittest Support
|
|
# 8. Debugging Support
|
|
# 9. Example Usage
|
|
|
|
######################################################################
|
|
## 1. Utility Functions
|
|
######################################################################
|
|
|
|
def is_private(prefix, base):
|
|
"""prefix, base -> true iff name prefix + "." + base is "private".
|
|
|
|
Prefix may be an empty string, and base does not contain a period.
|
|
Prefix is ignored (although functions you write conforming to this
|
|
protocol may make use of it).
|
|
Return true iff base begins with an (at least one) underscore, but
|
|
does not both begin and end with (at least) two underscores.
|
|
|
|
>>> is_private("a.b", "my_func")
|
|
False
|
|
>>> is_private("____", "_my_func")
|
|
True
|
|
>>> is_private("someclass", "__init__")
|
|
False
|
|
>>> is_private("sometypo", "__init_")
|
|
True
|
|
>>> is_private("x.y.z", "_")
|
|
True
|
|
>>> is_private("_x.y.z", "__")
|
|
False
|
|
>>> is_private("", "") # senseless but consistent
|
|
False
|
|
"""
|
|
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
|
|
|
|
def _extract_future_flags(globs):
|
|
"""
|
|
Return the compiler-flags associated with the future features that
|
|
have been imported into the given namespace (globs).
|
|
"""
|
|
flags = 0
|
|
for fname in __future__.all_feature_names:
|
|
feature = globs.get(fname, None)
|
|
if feature is getattr(__future__, fname):
|
|
flags |= feature.compiler_flag
|
|
return flags
|
|
|
|
def _normalize_module(module, depth=2):
|
|
"""
|
|
Return the module specified by `module`. In particular:
|
|
- If `module` is a module, then return module.
|
|
- If `module` is a string, then import and return the
|
|
module with that name.
|
|
- If `module` is None, then return the calling module.
|
|
The calling module is assumed to be the module of
|
|
the stack frame at the given depth in the call stack.
|
|
"""
|
|
if inspect.ismodule(module):
|
|
return module
|
|
elif isinstance(module, (str, unicode)):
|
|
return __import__(module, globals(), locals(), ["*"])
|
|
elif module is None:
|
|
return sys.modules[sys._getframe(depth).f_globals['__name__']]
|
|
else:
|
|
raise TypeError("Expected a module, string, or None")
|
|
|
|
def _tag_msg(tag, msg, indent_msg=True):
|
|
"""
|
|
Return a string that displays a tag-and-message pair nicely,
|
|
keeping the tag and its message on the same line when that
|
|
makes sense. If `indent_msg` is true, then messages that are
|
|
put on separate lines will be indented.
|
|
"""
|
|
# What string should we use to indent contents?
|
|
INDENT = ' '
|
|
|
|
# If the message doesn't end in a newline, then add one.
|
|
if msg[-1:] != '\n':
|
|
msg += '\n'
|
|
# If the message is short enough, and contains no internal
|
|
# newlines, then display it on the same line as the tag.
|
|
# Otherwise, display the tag on its own line.
|
|
if (len(tag) + len(msg) < 75 and
|
|
msg.find('\n', 0, len(msg)-1) == -1):
|
|
return '%s: %s' % (tag, msg)
|
|
else:
|
|
if indent_msg:
|
|
msg = '\n'.join([INDENT+l for l in msg.split('\n')])
|
|
msg = msg[:-len(INDENT)]
|
|
return '%s:\n%s' % (tag, msg)
|
|
|
|
# Override some StringIO methods.
|
|
class _SpoofOut(StringIO):
|
|
def getvalue(self):
|
|
result = StringIO.getvalue(self)
|
|
# If anything at all was written, make sure there's a trailing
|
|
# newline. There's no way for the expected output to indicate
|
|
# that a trailing newline is missing.
|
|
if result and not result.endswith("\n"):
|
|
result += "\n"
|
|
# Prevent softspace from screwing up the next test case, in
|
|
# case they used print with a trailing comma in an example.
|
|
if hasattr(self, "softspace"):
|
|
del self.softspace
|
|
return result
|
|
|
|
def truncate(self, size=None):
|
|
StringIO.truncate(self, size)
|
|
if hasattr(self, "softspace"):
|
|
del self.softspace
|
|
|
|
######################################################################
|
|
## 2. Example & DocTest
|
|
######################################################################
|
|
## - An "example" is a <source, want> pair, where "source" is a
|
|
## fragment of source code, and "want" is the expected output for
|
|
## "source." The Example class also includes information about
|
|
## where the example was extracted from.
|
|
##
|
|
## - A "doctest" is a collection of examples extracted from a string
|
|
## (such as an object's docstring). The DocTest class also includes
|
|
## information about where the string was extracted from.
|
|
|
|
class Example:
|
|
"""
|
|
A single doctest example, consisting of source code and expected
|
|
output. Example defines the following attributes:
|
|
|
|
- source: The source code that should be run. It ends with a
|
|
newline iff the source spans more than one line.
|
|
|
|
- want: The expected output from running the source code. If
|
|
not empty, then this string ends with a newline.
|
|
|
|
- lineno: The line number within the DocTest string containing
|
|
this Example where the Example begins. This line number is
|
|
zero-based, with respect to the beginning of the DocTest.
|
|
"""
|
|
def __init__(self, source, want, lineno):
|
|
# Check invariants.
|
|
if (source[-1:] == '\n') != ('\n' in source[:-1]):
|
|
raise AssertionError("source must end with newline iff "
|
|
"source contains more than one line")
|
|
if want and want[-1] != '\n':
|
|
raise AssertionError("non-empty want must end with newline")
|
|
# Store properties.
|
|
self.source = source
|
|
self.want = want
|
|
self.lineno = lineno
|
|
|
|
class DocTest:
|
|
"""
|
|
A collection of doctest examples that should be run in a single
|
|
namespace. Each DocTest defines the following attributes:
|
|
|
|
- examples: the list of examples.
|
|
|
|
- globs: The namespace (aka globals) that the examples should
|
|
be run in.
|
|
|
|
- name: A name identifying the DocTest (typically, the name of
|
|
the object whose docstring this DocTest was extracted from).
|
|
|
|
- filename: The name of the file that this DocTest was extracted
|
|
from.
|
|
|
|
- lineno: The line number within filename where this DocTest
|
|
begins. This line number is zero-based, with respect to the
|
|
beginning of the file.
|
|
"""
|
|
def __init__(self, docstring, globs, name, filename, lineno):
|
|
"""
|
|
Create a new DocTest, by extracting examples from `docstring`.
|
|
The DocTest's globals are initialized with a copy of `globs`.
|
|
"""
|
|
# Store a copy of the globals
|
|
self.globs = globs.copy()
|
|
# Store identifying information
|
|
self.name = name
|
|
self.filename = filename
|
|
self.lineno = lineno
|
|
# Parse the docstring.
|
|
self.examples = self._parse(docstring)
|
|
|
|
_PS1 = ">>>"
|
|
_PS2 = "..."
|
|
_isPS1 = re.compile(r"(\s*)" + re.escape(_PS1)).match
|
|
_isPS2 = re.compile(r"(\s*)" + re.escape(_PS2)).match
|
|
_isEmpty = re.compile(r"\s*$").match
|
|
_isComment = re.compile(r"\s*#").match
|
|
|
|
def _parse(self, string):
|
|
if not string.endswith('\n'):
|
|
string += '\n'
|
|
examples = []
|
|
isPS1, isPS2 = self._isPS1, self._isPS2
|
|
isEmpty, isComment = self._isEmpty, self._isComment
|
|
lines = string.split("\n")
|
|
i, n = 0, len(lines)
|
|
while i < n:
|
|
# Search for an example (a PS1 line).
|
|
line = lines[i]
|
|
i += 1
|
|
m = isPS1(line)
|
|
if m is None:
|
|
continue
|
|
# line is a PS1 line.
|
|
j = m.end(0) # beyond the prompt
|
|
if isEmpty(line, j) or isComment(line, j):
|
|
# a bare prompt or comment -- not interesting
|
|
continue
|
|
# line is a non-trivial PS1 line.
|
|
lineno = i - 1
|
|
if line[j] != " ":
|
|
raise ValueError('line %r of the docstring for %s lacks '
|
|
'blank after %s: %r' %
|
|
(lineno, self.name, self._PS1, line))
|
|
|
|
j += 1
|
|
blanks = m.group(1)
|
|
nblanks = len(blanks)
|
|
# suck up this and following PS2 lines
|
|
source = []
|
|
while 1:
|
|
source.append(line[j:])
|
|
line = lines[i]
|
|
m = isPS2(line)
|
|
if m:
|
|
if m.group(1) != blanks:
|
|
raise ValueError('line %r of the docstring for %s '
|
|
'has inconsistent leading whitespace: %r' %
|
|
(i, self.name, line))
|
|
i += 1
|
|
else:
|
|
break
|
|
# get rid of useless null line from trailing empty "..."
|
|
if source[-1] == "":
|
|
assert len(source) > 1
|
|
del source[-1]
|
|
if len(source) == 1:
|
|
source = source[0]
|
|
else:
|
|
source = "\n".join(source) + "\n"
|
|
# suck up response
|
|
if isPS1(line) or isEmpty(line):
|
|
want = ""
|
|
else:
|
|
want = []
|
|
while 1:
|
|
if line[:nblanks] != blanks:
|
|
raise ValueError('line %r of the docstring for %s '
|
|
'has inconsistent leading whitespace: %r' %
|
|
(i, self.name, line))
|
|
want.append(line[nblanks:])
|
|
i += 1
|
|
line = lines[i]
|
|
if isPS1(line) or isEmpty(line):
|
|
break
|
|
want = "\n".join(want) + "\n"
|
|
examples.append(Example(source, want, lineno))
|
|
return examples
|
|
|
|
def __repr__(self):
|
|
if len(self.examples) == 0:
|
|
examples = 'no examples'
|
|
elif len(self.examples) == 1:
|
|
examples = '1 example'
|
|
else:
|
|
examples = '%d examples' % len(self.examples)
|
|
return ('<DocTest %s from %s:%s (%s)>' %
|
|
(self.name, self.filename, self.lineno, examples))
|
|
|
|
|
|
# This lets us sort tests by name:
|
|
def __cmp__(self, other):
|
|
if not isinstance(other, DocTest):
|
|
return -1
|
|
return cmp((self.name, self.filename, self.lineno, id(self)),
|
|
(other.name, other.filename, other.lineno, id(other)))
|
|
|
|
######################################################################
|
|
## 3. DocTest Finder
|
|
######################################################################
|
|
|
|
class DocTestFinder:
|
|
"""
|
|
A class used to extract the DocTests that are relevant to a given
|
|
object, from its docstring and the docstrings of its contained
|
|
objects. Doctests can currently be extracted from the following
|
|
object types: modules, functions, classes, methods, staticmethods,
|
|
classmethods, and properties.
|
|
|
|
An optional name filter and an optional object filter may be
|
|
passed to the constructor, to restrict which contained objects are
|
|
examined by the doctest finder:
|
|
|
|
- The name filter is a function `f(prefix, base)`, that returns
|
|
true if an object named `prefix.base` should be ignored.
|
|
- The object filter is a function `f(obj)` that returns true
|
|
if the given object should be ignored.
|
|
|
|
Each object is ignored if either filter function returns true for
|
|
that object. These filter functions are applied when examining
|
|
the contents of a module or of a class, but not when examining a
|
|
module's `__test__` dictionary. By default, no objects are
|
|
ignored.
|
|
"""
|
|
|
|
def __init__(self, verbose=False, namefilter=None, objfilter=None,
|
|
recurse=True):
|
|
"""
|
|
Create a new doctest finder.
|
|
|
|
If the optional argument `recurse` is false, then `find` will
|
|
only examine the given object, and not any contained objects.
|
|
"""
|
|
self._verbose = verbose
|
|
self._namefilter = namefilter
|
|
self._objfilter = objfilter
|
|
self._recurse = recurse
|
|
|
|
def find(self, obj, name=None, module=None, globs=None,
|
|
extraglobs=None, ignore_imports=True):
|
|
"""
|
|
Return a list of the DocTests that are defined by the given
|
|
object's docstring, or by any of its contained objects'
|
|
docstrings.
|
|
|
|
The optional parameter `module` is the module that contains
|
|
the given object. If the module is not specified, then the
|
|
test finder will attempt to automatically determine the
|
|
correct module. The object's module is used:
|
|
|
|
- As a default namespace, if `globs` is not specified.
|
|
- To prevent the DocTestFinder from extracting DocTests
|
|
from objects that are imported from other modules
|
|
(as long as `ignore_imports` is true).
|
|
- To find the name of the file containing the object.
|
|
- To help find the line number of the object within its
|
|
file.
|
|
|
|
The globals for each DocTest is formed by combining `globs`
|
|
and `extraglobs` (bindings in `extraglobs` override bindings
|
|
in `globs`). A new copy of the globals dictionary is created
|
|
for each DocTest. If `globs` is not specified, then it
|
|
defaults to the module's `__dict__`, if specified, or {}
|
|
otherwise. If `extraglobs` is not specified, then it defaults
|
|
to {}.
|
|
|
|
If the optional flag `ignore_imports` is true, then the
|
|
doctest finder will ignore any contained objects whose module
|
|
does not match `module`. Otherwise, it will extract tests
|
|
from all contained objects, including imported objects.
|
|
"""
|
|
# If name was not specified, then extract it from the object.
|
|
if name is None:
|
|
name = getattr(obj, '__name__', None)
|
|
if name is None:
|
|
raise ValueError("DocTestFinder.find: name must be given "
|
|
"when obj.__name__ doesn't exist: %r" %
|
|
(type(obj),))
|
|
|
|
# Find the module that contains the given object (if obj is
|
|
# a module, then module=obj.). Note: this may fail, in which
|
|
# case module will be None.
|
|
if module is None:
|
|
module = inspect.getmodule(obj)
|
|
|
|
# Read the module's source code. This is used by
|
|
# DocTestFinder._find_lineno to find the line number for a
|
|
# given object's docstring.
|
|
try:
|
|
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
|
|
source_lines = linecache.getlines(file)
|
|
if not source_lines:
|
|
source_lines = None
|
|
except TypeError:
|
|
source_lines = None
|
|
|
|
# Initialize globals, and merge in extraglobs.
|
|
if globs is None:
|
|
if module is None:
|
|
globs = {}
|
|
else:
|
|
globs = module.__dict__.copy()
|
|
else:
|
|
globs = globs.copy()
|
|
if extraglobs is not None:
|
|
globs.update(extraglobs)
|
|
|
|
# Recursively expore `obj`, extracting DocTests.
|
|
tests = []
|
|
self._find(tests, obj, name, module, source_lines,
|
|
globs, ignore_imports, {})
|
|
return tests
|
|
|
|
def _filter(self, obj, prefix, base):
|
|
"""
|
|
Return true if the given object should not be examined.
|
|
"""
|
|
return ((self._namefilter is not None and
|
|
self._namefilter(prefix, base)) or
|
|
(self._objfilter is not None and
|
|
self._objfilter(obj)))
|
|
|
|
def _from_module(self, module, object):
|
|
"""
|
|
Return true if the given object is defined in the given
|
|
module.
|
|
"""
|
|
if module is None:
|
|
return True
|
|
elif inspect.isfunction(object):
|
|
return module.__dict__ is object.func_globals
|
|
elif inspect.isclass(object):
|
|
return module.__name__ == object.__module__
|
|
elif inspect.getmodule(object) is not None:
|
|
return module is inspect.getmodule(object)
|
|
elif hasattr(object, '__module__'):
|
|
return module.__name__ == object.__module__
|
|
elif isinstance(object, property):
|
|
return True # [XX] no way not be sure.
|
|
else:
|
|
raise ValueError("object must be a class or function")
|
|
|
|
def _find(self, tests, obj, name, module, source_lines,
|
|
globs, ignore_imports, seen):
|
|
"""
|
|
Find tests for the given object and any contained objects, and
|
|
add them to `tests`.
|
|
"""
|
|
if self._verbose:
|
|
print 'Finding tests in %s' % name
|
|
|
|
# If we've already processed this object, then ignore it.
|
|
if id(obj) in seen:
|
|
return
|
|
seen[id(obj)] = 1
|
|
|
|
# Find a test for this object, and add it to the list of tests.
|
|
test = self._get_test(obj, name, module, globs, source_lines)
|
|
if test is not None:
|
|
tests.append(test)
|
|
|
|
# Look for tests in a module's contained objects.
|
|
if inspect.ismodule(obj) and self._recurse:
|
|
for valname, val in obj.__dict__.items():
|
|
# Check if this contained object should be ignored.
|
|
if self._filter(val, name, valname):
|
|
continue
|
|
valname = '%s.%s' % (name, valname)
|
|
# Recurse to functions & classes.
|
|
if ((inspect.isfunction(val) or inspect.isclass(val)) and
|
|
(self._from_module(module, val) or not ignore_imports)):
|
|
self._find(tests, val, valname, module, source_lines,
|
|
globs, ignore_imports, seen)
|
|
|
|
# Look for tests in a module's __test__ dictionary.
|
|
if inspect.ismodule(obj) and self._recurse:
|
|
for valname, val in getattr(obj, '__test__', {}).items():
|
|
if not isinstance(valname, basestring):
|
|
raise ValueError("DocTestFinder.find: __test__ keys "
|
|
"must be strings: %r" %
|
|
(type(valname),))
|
|
if not (inspect.isfunction(val) or inspect.isclass(val) or
|
|
inspect.ismethod(val) or inspect.ismodule(val) or
|
|
isinstance(val, basestring)):
|
|
raise ValueError("DocTestFinder.find: __test__ values "
|
|
"must be strings, functions, methods, "
|
|
"classes, or modules: %r" %
|
|
(type(val),))
|
|
valname = '%s.%s' % (name, valname)
|
|
self._find(tests, val, valname, module, source_lines,
|
|
globs, ignore_imports, seen)
|
|
|
|
# Look for tests in a class's contained objects.
|
|
if inspect.isclass(obj) and self._recurse:
|
|
for valname, val in obj.__dict__.items():
|
|
# Check if this contained object should be ignored.
|
|
if self._filter(val, name, valname):
|
|
continue
|
|
# Special handling for staticmethod/classmethod.
|
|
if isinstance(val, staticmethod):
|
|
val = getattr(obj, valname)
|
|
if isinstance(val, classmethod):
|
|
val = getattr(obj, valname).im_func
|
|
|
|
# Recurse to methods, properties, and nested classes.
|
|
if ((inspect.isfunction(val) or inspect.isclass(val) or
|
|
isinstance(val, property)) and
|
|
(self._from_module(module, val) or not ignore_imports)):
|
|
valname = '%s.%s' % (name, valname)
|
|
self._find(tests, val, valname, module, source_lines,
|
|
globs, ignore_imports, seen)
|
|
|
|
def _get_test(self, obj, name, module, globs, source_lines):
|
|
"""
|
|
Return a DocTest for the given object, if it defines a docstring;
|
|
otherwise, return None.
|
|
"""
|
|
# Extract the object's docstring. If it doesn't have one,
|
|
# then return None (no test for this object).
|
|
if isinstance(obj, basestring):
|
|
docstring = obj
|
|
else:
|
|
try:
|
|
if obj.__doc__ is None:
|
|
return None
|
|
docstring = str(obj.__doc__)
|
|
except (TypeError, AttributeError):
|
|
return None
|
|
|
|
# Don't bother if the docstring is empty.
|
|
if not docstring:
|
|
return None
|
|
|
|
# Find the docstring's location in the file.
|
|
lineno = self._find_lineno(obj, source_lines)
|
|
|
|
# Return a DocTest for this object.
|
|
if module is None:
|
|
filename = None
|
|
else:
|
|
filename = getattr(module, '__file__', module.__name__)
|
|
return DocTest(docstring, globs, name, filename, lineno)
|
|
|
|
def _find_lineno(self, obj, source_lines):
|
|
"""
|
|
Return a line number of the given object's docstring. Note:
|
|
this method assumes that the object has a docstring.
|
|
"""
|
|
lineno = None
|
|
|
|
# Find the line number for modules.
|
|
if inspect.ismodule(obj):
|
|
lineno = 0
|
|
|
|
# Find the line number for classes.
|
|
# Note: this could be fooled if a class is defined multiple
|
|
# times in a single file.
|
|
if inspect.isclass(obj):
|
|
if source_lines is None:
|
|
return None
|
|
pat = re.compile(r'^\s*class\s*%s\b' %
|
|
getattr(obj, '__name__', '-'))
|
|
for i, line in enumerate(source_lines):
|
|
if pat.match(line):
|
|
lineno = i
|
|
break
|
|
|
|
# Find the line number for functions & methods.
|
|
if inspect.ismethod(obj): obj = obj.im_func
|
|
if inspect.isfunction(obj): obj = obj.func_code
|
|
if inspect.istraceback(obj): obj = obj.tb_frame
|
|
if inspect.isframe(obj): obj = obj.f_code
|
|
if inspect.iscode(obj):
|
|
lineno = getattr(obj, 'co_firstlineno', None)-1
|
|
|
|
# Find the line number where the docstring starts. Assume
|
|
# that it's the first line that begins with a quote mark.
|
|
# Note: this could be fooled by a multiline function
|
|
# signature, where a continuation line begins with a quote
|
|
# mark.
|
|
if lineno is not None:
|
|
if source_lines is None:
|
|
return lineno+1
|
|
pat = re.compile('(^|.*:)\s*\w*("|\')')
|
|
for lineno in range(lineno, len(source_lines)):
|
|
if pat.match(source_lines[lineno]):
|
|
return lineno
|
|
|
|
# We couldn't find the line number.
|
|
return None
|
|
|
|
######################################################################
|
|
## 4. DocTest Runner
|
|
######################################################################
|
|
|
|
# [XX] Should overridable methods (eg DocTestRunner.check_output) be
|
|
# named with a leading underscore?
|
|
|
|
class DocTestRunner:
|
|
"""
|
|
A class used to run DocTest test cases, and accumulate statistics.
|
|
The `run` method is used to process a single DocTest case. It
|
|
returns a tuple `(f, t)`, where `t` is the number of test cases
|
|
tried, and `f` is the number of test cases that failed.
|
|
|
|
>>> tests = DocTestFinder().find(_TestClass)
|
|
>>> runner = DocTestRunner(verbose=False)
|
|
>>> for test in tests:
|
|
... print runner.run(test)
|
|
(0, 2)
|
|
(0, 1)
|
|
(0, 2)
|
|
(0, 2)
|
|
|
|
The `summarize` method prints a summary of all the test cases that
|
|
have been run by the runner, and returns an aggregated `(f, t)`
|
|
tuple:
|
|
|
|
>>> runner.summarize(verbose=1)
|
|
4 items passed all tests:
|
|
2 tests in _TestClass
|
|
2 tests in _TestClass.__init__
|
|
2 tests in _TestClass.get
|
|
1 tests in _TestClass.square
|
|
7 tests in 4 items.
|
|
7 passed and 0 failed.
|
|
Test passed.
|
|
(0, 7)
|
|
|
|
The aggregated number of tried examples and failed examples is
|
|
also available via the `tries` and `failures` attributes:
|
|
|
|
>>> runner.tries
|
|
7
|
|
>>> runner.failures
|
|
0
|
|
|
|
The comparison between expected outputs and actual outputs is done
|
|
by the `check_output` method. This comparison may be customized
|
|
with a number of option flags; see the documentation for `testmod`
|
|
for more information. If the option flags are insufficient, then
|
|
the comparison may also be customized by subclassing
|
|
DocTestRunner, and overriding the methods `check_output` and
|
|
`output_difference`.
|
|
|
|
The test runner's display output can be controlled in two ways.
|
|
First, an output function (`out) can be passed to
|
|
`TestRunner.run`; this function will be called with strings that
|
|
should be displayed. It defaults to `sys.stdout.write`. If
|
|
capturing the output is not sufficient, then the display output
|
|
can be also customized by subclassing DocTestRunner, and
|
|
overriding the methods `report_start`, `report_success`,
|
|
`report_unexpected_exception`, and `report_failure`.
|
|
"""
|
|
# This divider string is used to separate failure messages, and to
|
|
# separate sections of the summary.
|
|
DIVIDER = "*" * 70
|
|
|
|
def __init__(self, verbose=None, optionflags=0):
|
|
"""
|
|
Create a new test runner.
|
|
|
|
Optional keyword arg 'verbose' prints lots of stuff if true,
|
|
only failures if false; by default, it's true iff '-v' is in
|
|
sys.argv.
|
|
|
|
Optional argument `optionflags` can be used to control how the
|
|
test runner compares expected output to actual output, and how
|
|
it displays failures. See the documentation for `testmod` for
|
|
more information.
|
|
"""
|
|
if verbose is None:
|
|
verbose = '-v' in sys.argv
|
|
self._verbose = verbose
|
|
self.optionflags = optionflags
|
|
|
|
# Keep track of the examples we've run.
|
|
self.tries = 0
|
|
self.failures = 0
|
|
self._name2ft = {}
|
|
|
|
# Create a fake output target for capturing doctest output.
|
|
self._fakeout = _SpoofOut()
|
|
|
|
#/////////////////////////////////////////////////////////////////
|
|
# Output verification methods
|
|
#/////////////////////////////////////////////////////////////////
|
|
# These two methods should be updated together, since the
|
|
# output_difference method needs to know what should be considered
|
|
# to match by check_output.
|
|
|
|
def check_output(self, want, got):
|
|
"""
|
|
Return True iff the actual output (`got`) matches the expected
|
|
output (`want`). These strings are always considered to match
|
|
if they are identical; but depending on what option flags the
|
|
test runner is using, several non-exact match types are also
|
|
possible. See the documentation for `TestRunner` for more
|
|
information about option flags.
|
|
"""
|
|
# Handle the common case first, for efficiency:
|
|
# if they're string-identical, always return true.
|
|
if got == want:
|
|
return True
|
|
|
|
# The values True and False replaced 1 and 0 as the return
|
|
# value for boolean comparisons in Python 2.3.
|
|
if not (self.optionflags & DONT_ACCEPT_TRUE_FOR_1):
|
|
if (got,want) == ("True\n", "1\n"):
|
|
return True
|
|
if (got,want) == ("False\n", "0\n"):
|
|
return True
|
|
|
|
# <BLANKLINE> can be used as a special sequence to signify a
|
|
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
|
|
if not (self.optionflags & DONT_ACCEPT_BLANKLINE):
|
|
# Replace <BLANKLINE> in want with a blank line.
|
|
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
|
|
'', want)
|
|
# If a line in got contains only spaces, then remove the
|
|
# spaces.
|
|
got = re.sub('(?m)^\s*?$', '', got)
|
|
if got == want:
|
|
return True
|
|
|
|
# This flag causes doctest to ignore any differences in the
|
|
# contents of whitespace strings. Note that this can be used
|
|
# in conjunction with the ELLISPIS flag.
|
|
if (self.optionflags & NORMALIZE_WHITESPACE):
|
|
got = ' '.join(got.split())
|
|
want = ' '.join(want.split())
|
|
if got == want:
|
|
return True
|
|
|
|
# The ELLIPSIS flag says to let the sequence "..." in `want`
|
|
# match any substring in `got`. We implement this by
|
|
# transforming `want` into a regular expression.
|
|
if (self.optionflags & ELLIPSIS):
|
|
# Escape any special regexp characters
|
|
want_re = re.escape(want)
|
|
# Replace ellipsis markers ('...') with .*
|
|
want_re = want_re.replace(re.escape(ELLIPSIS_MARKER), '.*')
|
|
# Require that it matches the entire string; and set the
|
|
# re.DOTALL flag (with '(?s)').
|
|
want_re = '(?s)^%s$' % want_re
|
|
# Check if the `want_re` regexp matches got.
|
|
if re.match(want_re, got):
|
|
return True
|
|
|
|
# We didn't find any match; return false.
|
|
return False
|
|
|
|
def output_difference(self, want, got):
|
|
"""
|
|
Return a string describing the differences between the
|
|
expected output (`want`) and the actual output (`got`).
|
|
"""
|
|
# If <BLANKLINE>s are being used, then replace <BLANKLINE>
|
|
# with blank lines in the expected output string.
|
|
if not (self.optionflags & DONT_ACCEPT_BLANKLINE):
|
|
want = re.sub('(?m)^%s$' % re.escape(BLANKLINE_MARKER), '', want)
|
|
|
|
# Check if we should use diff. Don't use diff if the actual
|
|
# or expected outputs are too short, or if the expected output
|
|
# contains an ellipsis marker.
|
|
if ((self.optionflags & (UNIFIED_DIFF | CONTEXT_DIFF)) and
|
|
want.count('\n') > 2 and got.count('\n') > 2 and
|
|
not (self.optionflags & ELLIPSIS and '...' in want)):
|
|
# Split want & got into lines.
|
|
want_lines = [l+'\n' for l in want.split('\n')]
|
|
got_lines = [l+'\n' for l in got.split('\n')]
|
|
# Use difflib to find their differences.
|
|
if self.optionflags & UNIFIED_DIFF:
|
|
diff = difflib.unified_diff(want_lines, got_lines, n=2,
|
|
fromfile='Expected', tofile='Got')
|
|
kind = 'unified'
|
|
elif self.optionflags & CONTEXT_DIFF:
|
|
diff = difflib.context_diff(want_lines, got_lines, n=2,
|
|
fromfile='Expected', tofile='Got')
|
|
kind = 'context'
|
|
else:
|
|
assert 0, 'Bad diff option'
|
|
# Remove trailing whitespace on diff output.
|
|
diff = [line.rstrip() + '\n' for line in diff]
|
|
return _tag_msg("Differences (" + kind + " diff)",
|
|
''.join(diff))
|
|
|
|
# If we're not using diff, then simply list the expected
|
|
# output followed by the actual output.
|
|
return (_tag_msg("Expected", want or "Nothing") +
|
|
_tag_msg("Got", got))
|
|
|
|
#/////////////////////////////////////////////////////////////////
|
|
# Reporting methods
|
|
#/////////////////////////////////////////////////////////////////
|
|
|
|
def report_start(self, out, test, example):
|
|
"""
|
|
Report that the test runner is about to process the given
|
|
example. (Only displays a message if verbose=True)
|
|
"""
|
|
if self._verbose:
|
|
out(_tag_msg("Trying", example.source) +
|
|
_tag_msg("Expecting", example.want or "nothing"))
|
|
|
|
def report_success(self, out, test, example, got):
|
|
"""
|
|
Report that the given example ran successfully. (Only
|
|
displays a message if verbose=True)
|
|
"""
|
|
if self._verbose:
|
|
out("ok\n")
|
|
|
|
def report_failure(self, out, test, example, got):
|
|
"""
|
|
Report that the given example failed.
|
|
"""
|
|
# Print an error message.
|
|
out(self.__failure_header(test, example) +
|
|
self.output_difference(example.want, got))
|
|
|
|
def report_unexpected_exception(self, out, test, example, exc_info):
|
|
"""
|
|
Report that the given example raised an unexpected exception.
|
|
"""
|
|
# Get a traceback message.
|
|
excout = StringIO()
|
|
exc_type, exc_val, exc_tb = exc_info
|
|
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
|
|
exception_tb = excout.getvalue()
|
|
# Print an error message.
|
|
out(self.__failure_header(test, example) +
|
|
_tag_msg("Exception raised", exception_tb))
|
|
|
|
def __failure_header(self, test, example):
|
|
s = (self.DIVIDER + "\n" +
|
|
_tag_msg("Failure in example", example.source))
|
|
if test.filename is None:
|
|
# [XX] I'm not putting +1 here, to give the same output
|
|
# as the old version. But I think it *should* go here.
|
|
return s + ("from line #%s of %s\n" %
|
|
(example.lineno, test.name))
|
|
elif test.lineno is None:
|
|
return s + ("from line #%s of %s in %s\n" %
|
|
(example.lineno+1, test.name, test.filename))
|
|
else:
|
|
lineno = test.lineno+example.lineno+1
|
|
return s + ("from line #%s of %s (%s)\n" %
|
|
(lineno, test.filename, test.name))
|
|
|
|
#/////////////////////////////////////////////////////////////////
|
|
# DocTest Running
|
|
#/////////////////////////////////////////////////////////////////
|
|
|
|
# A regular expression for handling `want` strings that contain
|
|
# expected exceptions. It divides `want` into two pieces: the
|
|
# pre-exception output (`out`) and the exception message (`exc`),
|
|
# as generated by traceback.format_exception_only(). (I assume
|
|
# that the exception_only message is the first non-indented line
|
|
# starting with word characters after the "Traceback ...".)
|
|
_EXCEPTION_RE = re.compile(('^(?P<out>.*)'
|
|
'^(?P<hdr>Traceback \((?:%s|%s)\):)\s*$.*?'
|
|
'^(?P<exc>\w+.*)') %
|
|
('most recent call last', 'innermost last'),
|
|
re.MULTILINE | re.DOTALL)
|
|
|
|
_OPTION_DIRECTIVE_RE = re.compile('\s*doctest:\s*(?P<flags>[^#\n]*)')
|
|
|
|
def __handle_directive(self, example):
|
|
"""
|
|
Check if the given example is actually a directive to doctest
|
|
(to turn an optionflag on or off); and if it is, then handle
|
|
the directive.
|
|
|
|
Return true iff the example is actually a directive (and so
|
|
should not be executed).
|
|
|
|
"""
|
|
m = self._OPTION_DIRECTIVE_RE.match(example.source)
|
|
if m is None:
|
|
return False
|
|
|
|
for flag in m.group('flags').upper().split():
|
|
if (flag[:1] not in '+-' or
|
|
flag[1:] not in OPTIONFLAGS_BY_NAME):
|
|
raise ValueError('Bad doctest option directive: '+flag)
|
|
if flag[0] == '+':
|
|
self.optionflags |= OPTIONFLAGS_BY_NAME[flag[1:]]
|
|
else:
|
|
self.optionflags &= ~OPTIONFLAGS_BY_NAME[flag[1:]]
|
|
return True
|
|
|
|
def __run(self, test, compileflags, out):
|
|
"""
|
|
Run the examples in `test`. Write the outcome of each example
|
|
with one of the `DocTestRunner.report_*` methods, using the
|
|
writer function `out`. `compileflags` is the set of compiler
|
|
flags that should be used to execute examples. Return a tuple
|
|
`(f, t)`, where `t` is the number of examples tried, and `f`
|
|
is the number of examples that failed. The examples are run
|
|
in the namespace `test.globs`.
|
|
"""
|
|
# Keep track of the number of failures and tries.
|
|
failures = tries = 0
|
|
|
|
# Save the option flags (since option directives can be used
|
|
# to modify them).
|
|
original_optionflags = self.optionflags
|
|
|
|
# Process each example.
|
|
for example in test.examples:
|
|
# Check if it's an option directive. If it is, then handle
|
|
# it, and go on to the next example.
|
|
if self.__handle_directive(example):
|
|
continue
|
|
|
|
# Record that we started this example.
|
|
tries += 1
|
|
self.report_start(out, test, example)
|
|
|
|
# Run the example in the given context (globs), and record
|
|
# any exception that gets raised. (But don't intercept
|
|
# keyboard interrupts.)
|
|
try:
|
|
# If the example is a compound statement on one line,
|
|
# like "if 1: print 2", then compile() requires a
|
|
# trailing newline. Rather than analyze that, always
|
|
# append one (it never hurts).
|
|
exec compile(example.source + '\n', "<string>", "single",
|
|
compileflags, 1) in test.globs
|
|
exception = None
|
|
except KeyboardInterrupt:
|
|
raise
|
|
except:
|
|
exception = sys.exc_info()
|
|
|
|
# Extract the example's actual output from fakeout, and
|
|
# write it to `got`. Add a terminating newline if it
|
|
# doesn't have already one.
|
|
got = self._fakeout.getvalue()
|
|
self._fakeout.truncate(0)
|
|
|
|
# If the example executed without raising any exceptions,
|
|
# then verify its output and report its outcome.
|
|
if exception is None:
|
|
if self.check_output(example.want, got):
|
|
self.report_success(out, test, example, got)
|
|
else:
|
|
self.report_failure(out, test, example, got)
|
|
failures += 1
|
|
|
|
# If the example raised an exception, then check if it was
|
|
# expected.
|
|
else:
|
|
exc_info = sys.exc_info()
|
|
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
|
|
|
|
# Search the `want` string for an exception. If we don't
|
|
# find one, then report an unexpected exception.
|
|
m = self._EXCEPTION_RE.match(example.want)
|
|
if m is None:
|
|
self.report_unexpected_exception(out, test, example,
|
|
exc_info)
|
|
failures += 1
|
|
else:
|
|
exc_hdr = m.group('hdr')+'\n' # Exception header
|
|
# The test passes iff the pre-exception output and
|
|
# the exception description match the values given
|
|
# in `want`.
|
|
if (self.check_output(m.group('out'), got) and
|
|
self.check_output(m.group('exc'), exc_msg)):
|
|
# Is +exc_msg the right thing here??
|
|
self.report_success(out, test, example,
|
|
got+exc_hdr+exc_msg)
|
|
else:
|
|
self.report_failure(out, test, example,
|
|
got+exc_hdr+exc_msg)
|
|
failures += 1
|
|
|
|
# Restore the option flags (in case they were modified)
|
|
self.optionflags = original_optionflags
|
|
|
|
# Record and return the number of failures and tries.
|
|
self.__record_outcome(test, failures, tries)
|
|
return failures, tries
|
|
|
|
def __record_outcome(self, test, f, t):
|
|
"""
|
|
Record the fact that the given DocTest (`test`) generated `f`
|
|
failures out of `t` tried examples.
|
|
"""
|
|
f2, t2 = self._name2ft.get(test.name, (0,0))
|
|
self._name2ft[test.name] = (f+f2, t+t2)
|
|
self.failures += f
|
|
self.tries += t
|
|
|
|
def run(self, test, compileflags=None, out=None, clear_globs=True):
|
|
"""
|
|
Run the examples in `test`, and display the results using the
|
|
writer function `out`.
|
|
|
|
The examples are run in the namespace `test.globs`. If
|
|
`clear_globs` is true (the default), then this namespace will
|
|
be cleared after the test runs, to help with garbage
|
|
collection. If you would like to examine the namespace after
|
|
the test completes, then use `clear_globs=False`.
|
|
|
|
`compileflags` gives the set of flags that should be used by
|
|
the Python compiler when running the examples. If not
|
|
specified, then it will default to the set of future-import
|
|
flags that apply to `globs`.
|
|
|
|
The output of each example is checked using
|
|
`DocTestRunner.check_output`, and the results are formatted by
|
|
the `DocTestRunner.report_*` methods.
|
|
"""
|
|
if compileflags is None:
|
|
compileflags = _extract_future_flags(test.globs)
|
|
if out is None:
|
|
out = sys.stdout.write
|
|
saveout = sys.stdout
|
|
|
|
try:
|
|
sys.stdout = self._fakeout
|
|
return self.__run(test, compileflags, out)
|
|
finally:
|
|
sys.stdout = saveout
|
|
# While Python gc can clean up most cycles on its own, it doesn't
|
|
# chase frame objects. This is especially irksome when running
|
|
# generator tests that raise exceptions, because a named generator-
|
|
# iterator gets an entry in globs, and the generator-iterator
|
|
# object's frame's traceback info points back to globs. This is
|
|
# easy to break just by clearing the namespace. This can also
|
|
# help to break other kinds of cycles, and even for cycles that
|
|
# gc can break itself it's better to break them ASAP.
|
|
if clear_globs:
|
|
test.globs.clear()
|
|
|
|
#/////////////////////////////////////////////////////////////////
|
|
# Summarization
|
|
#/////////////////////////////////////////////////////////////////
|
|
def summarize(self, verbose=None):
|
|
"""
|
|
Print a summary of all the test cases that have been run by
|
|
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
|
|
the total number of failed examples, and `t` is the total
|
|
number of tried examples.
|
|
|
|
The optional `verbose` argument controls how detailed the
|
|
summary is. If the verbosity is not specified, then the
|
|
DocTestRunner's verbosity is used.
|
|
"""
|
|
if verbose is None:
|
|
verbose = self._verbose
|
|
notests = []
|
|
passed = []
|
|
failed = []
|
|
totalt = totalf = 0
|
|
for x in self._name2ft.items():
|
|
name, (f, t) = x
|
|
assert f <= t
|
|
totalt += t
|
|
totalf += f
|
|
if t == 0:
|
|
notests.append(name)
|
|
elif f == 0:
|
|
passed.append( (name, t) )
|
|
else:
|
|
failed.append(x)
|
|
if verbose:
|
|
if notests:
|
|
print len(notests), "items had no tests:"
|
|
notests.sort()
|
|
for thing in notests:
|
|
print " ", thing
|
|
if passed:
|
|
print len(passed), "items passed all tests:"
|
|
passed.sort()
|
|
for thing, count in passed:
|
|
print " %3d tests in %s" % (count, thing)
|
|
if failed:
|
|
print self.DIVIDER
|
|
print len(failed), "items had failures:"
|
|
failed.sort()
|
|
for thing, (f, t) in failed:
|
|
print " %3d of %3d in %s" % (f, t, thing)
|
|
if verbose:
|
|
print totalt, "tests in", len(self._name2ft), "items."
|
|
print totalt - totalf, "passed and", totalf, "failed."
|
|
if totalf:
|
|
print "***Test Failed***", totalf, "failures."
|
|
elif verbose:
|
|
print "Test passed."
|
|
return totalf, totalt
|
|
|
|
######################################################################
|
|
## 5. Test Functions
|
|
######################################################################
|
|
# These should be backwards compatible.
|
|
|
|
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
|
|
report=True, optionflags=0, extraglobs=None):
|
|
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
|
|
report=True, optionflags=0, extraglobs=None
|
|
|
|
Test examples in docstrings in functions and classes reachable
|
|
from module m (or the current module if m is not supplied), starting
|
|
with m.__doc__. Unless isprivate is specified, private names
|
|
are not skipped.
|
|
|
|
Also test examples reachable from dict m.__test__ if it exists and is
|
|
not None. m.__dict__ maps names to functions, classes and strings;
|
|
function and class docstrings are tested even if the name is private;
|
|
strings are tested directly, as if they were docstrings.
|
|
|
|
Return (#failures, #tests).
|
|
|
|
See doctest.__doc__ for an overview.
|
|
|
|
Optional keyword arg "name" gives the name of the module; by default
|
|
use m.__name__.
|
|
|
|
Optional keyword arg "globs" gives a dict to be used as the globals
|
|
when executing examples; by default, use m.__dict__. A copy of this
|
|
dict is actually used for each docstring, so that each docstring's
|
|
examples start with a clean slate.
|
|
|
|
Optional keyword arg "extraglobs" gives a dictionary that should be
|
|
merged into the globals that are used to execute examples. By
|
|
default, no extra globals are used. This is new in 2.4.
|
|
|
|
Optional keyword arg "verbose" prints lots of stuff if true, prints
|
|
only failures if false; by default, it's true iff "-v" is in sys.argv.
|
|
|
|
Optional keyword arg "isprivate" specifies a function used to
|
|
determine whether a name is private. The default function is
|
|
treat all functions as public. Optionally, "isprivate" can be
|
|
set to doctest.is_private to skip over functions marked as private
|
|
using the underscore naming convention; see its docs for details.
|
|
|
|
Optional keyword arg "report" prints a summary at the end when true,
|
|
else prints nothing at the end. In verbose mode, the summary is
|
|
detailed, else very brief (in fact, empty if all tests passed).
|
|
|
|
Optional keyword arg "optionflags" or's together module constants,
|
|
and defaults to 0. This is new in 2.3. Possible values:
|
|
|
|
DONT_ACCEPT_TRUE_FOR_1
|
|
By default, if an expected output block contains just "1",
|
|
an actual output block containing just "True" is considered
|
|
to be a match, and similarly for "0" versus "False". When
|
|
DONT_ACCEPT_TRUE_FOR_1 is specified, neither substitution
|
|
is allowed.
|
|
|
|
DONT_ACCEPT_BLANKLINE
|
|
By default, if an expected output block contains a line
|
|
containing only the string "<BLANKLINE>", then that line
|
|
will match a blank line in the actual output. When
|
|
DONT_ACCEPT_BLANKLINE is specified, this substitution is
|
|
not allowed.
|
|
|
|
NORMALIZE_WHITESPACE
|
|
When NORMALIZE_WHITESPACE is specified, all sequences of
|
|
whitespace are treated as equal. I.e., any sequence of
|
|
whitespace within the expected output will match any
|
|
sequence of whitespace within the actual output.
|
|
|
|
ELLIPSIS
|
|
When ELLIPSIS is specified, then an ellipsis marker
|
|
("...") in the expected output can match any substring in
|
|
the actual output.
|
|
|
|
UNIFIED_DIFF
|
|
When UNIFIED_DIFF is specified, failures that involve
|
|
multi-line expected and actual outputs will be displayed
|
|
using a unified diff.
|
|
|
|
CONTEXT_DIFF
|
|
When CONTEXT_DIFF is specified, failures that involve
|
|
multi-line expected and actual outputs will be displayed
|
|
using a context diff.
|
|
"""
|
|
|
|
""" [XX] This is no longer true:
|
|
Advanced tomfoolery: testmod runs methods of a local instance of
|
|
class doctest.Tester, then merges the results into (or creates)
|
|
global Tester instance doctest.master. Methods of doctest.master
|
|
can be called directly too, if you want to do something unusual.
|
|
Passing report=0 to testmod is especially useful then, to delay
|
|
displaying a summary. Invoke doctest.master.summarize(verbose)
|
|
when you're done fiddling.
|
|
"""
|
|
# If no module was given, then use __main__.
|
|
if m is None:
|
|
# DWA - m will still be None if this wasn't invoked from the command
|
|
# line, in which case the following TypeError is about as good an error
|
|
# as we should expect
|
|
m = sys.modules.get('__main__')
|
|
|
|
# Check that we were actually given a module.
|
|
if not inspect.ismodule(m):
|
|
raise TypeError("testmod: module required; %r" % (m,))
|
|
|
|
# If no name was given, then use the module's name.
|
|
if name is None:
|
|
name = m.__name__
|
|
|
|
# Find, parse, and run all tests in the given module.
|
|
finder = DocTestFinder(namefilter=isprivate)
|
|
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
|
|
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
|
|
runner.run(test)
|
|
|
|
if report:
|
|
runner.summarize()
|
|
|
|
return runner.failures, runner.tries
|
|
|
|
def run_docstring_examples(f, globs, verbose=False, name="NoName",
|
|
compileflags=None, optionflags=0):
|
|
"""
|
|
Test examples in the given object's docstring (`f`), using `globs`
|
|
as globals. Optional argument `name` is used in failure messages.
|
|
If the optional argument `verbose` is true, then generate output
|
|
even if there are no failures.
|
|
|
|
`compileflags` gives the set of flags that should be used by the
|
|
Python compiler when running the examples. If not specified, then
|
|
it will default to the set of future-import flags that apply to
|
|
`globs`.
|
|
|
|
Optional keyword arg `optionflags` specifies options for the
|
|
testing and output. See the documentation for `testmod` for more
|
|
information.
|
|
"""
|
|
# Find, parse, and run all tests in the given module.
|
|
finder = DocTestFinder(verbose=verbose, recurse=False)
|
|
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
|
|
for test in finder.find(f, name, globs=globs):
|
|
runner.run(test, compileflags=compileflags)
|
|
|
|
######################################################################
|
|
## 6. Tester
|
|
######################################################################
|
|
# This is provided only for backwards compatibility. It's not
|
|
# actually used in any way.
|
|
|
|
class Tester:
|
|
def __init__(self, mod=None, globs=None, verbose=None,
|
|
isprivate=None, optionflags=0):
|
|
if mod is None and globs is None:
|
|
raise TypeError("Tester.__init__: must specify mod or globs")
|
|
if mod is not None and not _ismodule(mod):
|
|
raise TypeError("Tester.__init__: mod must be a module; %r" %
|
|
(mod,))
|
|
if globs is None:
|
|
globs = mod.__dict__
|
|
self.globs = globs
|
|
|
|
self.verbose = verbose
|
|
self.isprivate = isprivate
|
|
self.optionflags = optionflags
|
|
self.testfinder = DocTestFinder(namefilter=isprivate)
|
|
self.testrunner = DocTestRunner(verbose=verbose,
|
|
optionflags=optionflags)
|
|
|
|
def runstring(self, s, name):
|
|
test = DocTest(s, self.globs, name, None, None)
|
|
if self.verbose:
|
|
print "Running string", name
|
|
(f,t) = self.testrunner.run(test)
|
|
if self.verbose:
|
|
print f, "of", t, "examples failed in string", name
|
|
return (f,t)
|
|
|
|
def rundoc(self, object, name=None, module=None, ignore_imports=True):
|
|
f = t = 0
|
|
tests = self.testfinder.find(object, name, module=module,
|
|
globs=self.globs,
|
|
ignore_imports=ignore_imports)
|
|
for test in tests:
|
|
(f2, t2) = self.testrunner.run(test)
|
|
(f,t) = (f+f2, t+t2)
|
|
return (f,t)
|
|
|
|
def rundict(self, d, name, module=None):
|
|
import new
|
|
m = new.module(name)
|
|
m.__dict__.update(d)
|
|
ignore_imports = (module is not None)
|
|
return self.rundoc(m, name, module, ignore_imports)
|
|
|
|
def run__test__(self, d, name):
|
|
import new
|
|
m = new.module(name)
|
|
m.__test__ = d
|
|
return self.rundoc(m, name, module)
|
|
|
|
def summarize(self, verbose=None):
|
|
return self.testrunner.summarize(verbose)
|
|
|
|
def merge(self, other):
|
|
d = self.testrunner._name2ft
|
|
for name, (f, t) in other.testrunner._name2ft.items():
|
|
if name in d:
|
|
print "*** Tester.merge: '" + name + "' in both" \
|
|
" testers; summing outcomes."
|
|
f2, t2 = d[name]
|
|
f = f + f2
|
|
t = t + t2
|
|
d[name] = f, t
|
|
|
|
######################################################################
|
|
## 7. Unittest Support
|
|
######################################################################
|
|
|
|
class DocTestTestCase(unittest.TestCase):
|
|
"""A test case that wraps a test function.
|
|
|
|
This is useful for slipping pre-existing test functions into the
|
|
PyUnit framework. Optionally, set-up and tidy-up functions can be
|
|
supplied. As with TestCase, the tidy-up ('tearDown') function will
|
|
always be called if the set-up ('setUp') function ran successfully.
|
|
"""
|
|
|
|
def __init__(self, test_runner, test,
|
|
setUp=None, tearDown=None):
|
|
unittest.TestCase.__init__(self)
|
|
self.__test_runner = test_runner
|
|
self.__test = test
|
|
self.__setUp = setUp
|
|
self.__tearDown = tearDown
|
|
|
|
def setUp(self):
|
|
if self.__setUp is not None:
|
|
self.__setUp()
|
|
|
|
def tearDown(self):
|
|
if self.__tearDown is not None:
|
|
self.__tearDown()
|
|
|
|
def runTest(self):
|
|
test = self.__test
|
|
old = sys.stdout
|
|
new = StringIO()
|
|
try:
|
|
self.__test_runner.DIVIDER = "-"*70
|
|
failures, tries = self.__test_runner.run(test, out=new.write)
|
|
finally:
|
|
sys.stdout = old
|
|
|
|
if failures:
|
|
lname = '.'.join(test.name.split('.')[-1:])
|
|
if test.lineno is None:
|
|
lineno = 'unknown line number'
|
|
else:
|
|
lineno = 'line %s' % test.lineno
|
|
err = new.getvalue()
|
|
|
|
raise self.failureException(
|
|
'Failed doctest test for %s\n'
|
|
' File "%s", %s, in %s\n\n%s'
|
|
% (test.name, test.filename, lineno, lname, err))
|
|
|
|
def id(self):
|
|
return self.__test.name
|
|
|
|
def __repr__(self):
|
|
name = self.__test.name.split('.')
|
|
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
|
|
|
|
__str__ = __repr__
|
|
|
|
def shortDescription(self):
|
|
return "Doctest: " + self.__test.name
|
|
|
|
|
|
def DocTestSuite(module=None, filename=None, globs=None, extraglobs=None,
|
|
optionflags=0,
|
|
test_finder=None, test_runner=None,
|
|
setUp=lambda: None, tearDown=lambda: None):
|
|
"""
|
|
Convert doctest tests for a mudule to a unittest test suite
|
|
|
|
This tests convers each documentation string in a module that
|
|
contains doctest tests to a unittest test case. If any of the
|
|
tests in a doc string fail, then the test case fails. An error is
|
|
raised showing the name of the file containing the test and a
|
|
(sometimes approximate) line number.
|
|
|
|
A module argument provides the module to be tested. The argument
|
|
can be either a module or a module name.
|
|
|
|
If no argument is given, the calling module is used.
|
|
"""
|
|
if module is not None and filename is not None:
|
|
raise ValueError('Specify module or filename, not both.')
|
|
|
|
if test_finder is None:
|
|
test_finder = DocTestFinder()
|
|
if test_runner is None:
|
|
test_runner = DocTestRunner(optionflags=optionflags)
|
|
|
|
if filename is not None:
|
|
name = os.path.basename(filename)
|
|
test = Test(open(filename).read(),name,filename,0)
|
|
if globs is None:
|
|
globs = {}
|
|
else:
|
|
module = _normalize_module(module)
|
|
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
|
|
if globs is None:
|
|
globs = module.__dict__
|
|
if not tests: # [XX] why do we want to do this?
|
|
raise ValueError(module, "has no tests")
|
|
|
|
tests.sort()
|
|
suite = unittest.TestSuite()
|
|
for test in tests:
|
|
if len(test.examples) == 0: continue
|
|
if not test.filename:
|
|
filename = module.__file__
|
|
if filename.endswith(".pyc"):
|
|
filename = filename[:-1]
|
|
elif filename.endswith(".pyo"):
|
|
filename = filename[:-1]
|
|
test.filename = filename
|
|
suite.addTest(DocTestTestCase(test_runner, test,
|
|
setUp, tearDown))
|
|
|
|
return suite
|
|
|
|
######################################################################
|
|
## 8. Debugging Support
|
|
######################################################################
|
|
|
|
def _want_comment(example):
|
|
"""
|
|
Return a comment containing the expected output for the given
|
|
example.
|
|
"""
|
|
# Return the expected output, if any
|
|
want = example.want
|
|
if want:
|
|
if want[-1] == '\n': want = want[:-1]
|
|
want = "\n# ".join(want.split("\n"))
|
|
want = "\n# Expected:\n# %s" % want
|
|
return want
|
|
|
|
def testsource(module, name):
|
|
"""Extract the test sources from a doctest test docstring as a script
|
|
|
|
Provide the module (or dotted name of the module) containing the
|
|
test to be debugged and the name (within the module) of the object
|
|
with the doc string with tests to be debugged.
|
|
|
|
"""
|
|
module = _normalize_module(module)
|
|
tests = DocTestFinder().find(module)
|
|
test = [t for t in tests if t.name == name]
|
|
if not test:
|
|
raise ValueError(name, "not found in tests")
|
|
test = test[0]
|
|
testsrc = '\n'.join([
|
|
"%s%s" % (example.source, _want_comment(example))
|
|
for example in test.examples
|
|
])
|
|
return testsrc
|
|
|
|
def debug_src(src, pm=False, globs=None):
|
|
"""Debug a single doctest test doc string
|
|
|
|
The string is provided directly
|
|
"""
|
|
test = DocTest(src, globs or {}, 'debug', None, None)
|
|
|
|
testsrc = '\n'.join([
|
|
"%s%s" % (example.source, _want_comment(example))
|
|
for example in test.examples
|
|
])
|
|
debug_script(testsrc, pm, globs)
|
|
|
|
def debug_script(src, pm=False, globs=None):
|
|
"Debug a test script"
|
|
import pdb
|
|
|
|
srcfilename = tempfile.mktemp("doctestdebug.py")
|
|
f = open(srcfilename, 'w')
|
|
f.write(src)
|
|
f.close()
|
|
|
|
if globs:
|
|
globs = globs.copy()
|
|
else:
|
|
globs = {}
|
|
|
|
if pm:
|
|
try:
|
|
execfile(srcfilename, globs, globs)
|
|
except:
|
|
print sys.exc_info()[1]
|
|
pdb.post_mortem(sys.exc_info()[2])
|
|
else:
|
|
# Note that %r is vital here. '%s' instead can, e.g., cause
|
|
# backslashes to get treated as metacharacters on Windows.
|
|
pdb.run("execfile(%r)" % srcfilename, globs, globs)
|
|
|
|
def debug(module, name, pm=False):
|
|
"""Debug a single doctest test doc string
|
|
|
|
Provide the module (or dotted name of the module) containing the
|
|
test to be debugged and the name (within the module) of the object
|
|
with the doc string with tests to be debugged.
|
|
|
|
"""
|
|
module = _normalize_module(module)
|
|
testsrc = testsource(module, name)
|
|
debug_script(testsrc, pm, module.__dict__)
|
|
|
|
######################################################################
|
|
## 9. Example Usage
|
|
######################################################################
|
|
class _TestClass:
|
|
"""
|
|
A pointless class, for sanity-checking of docstring testing.
|
|
|
|
Methods:
|
|
square()
|
|
get()
|
|
|
|
>>> _TestClass(13).get() + _TestClass(-12).get()
|
|
1
|
|
>>> hex(_TestClass(13).square().get())
|
|
'0xa9'
|
|
"""
|
|
|
|
def __init__(self, val):
|
|
"""val -> _TestClass object with associated value val.
|
|
|
|
>>> t = _TestClass(123)
|
|
>>> print t.get()
|
|
123
|
|
"""
|
|
|
|
self.val = val
|
|
|
|
def square(self):
|
|
"""square() -> square TestClass's associated value
|
|
|
|
>>> _TestClass(13).square().get()
|
|
169
|
|
"""
|
|
|
|
self.val = self.val ** 2
|
|
return self
|
|
|
|
def get(self):
|
|
"""get() -> return TestClass's associated value.
|
|
|
|
>>> x = _TestClass(-42)
|
|
>>> print x.get()
|
|
-42
|
|
"""
|
|
|
|
return self.val
|
|
|
|
__test__ = {"_TestClass": _TestClass,
|
|
"string": r"""
|
|
Example of a string object, searched as-is.
|
|
>>> x = 1; y = 2
|
|
>>> x + y, x * y
|
|
(3, 2)
|
|
""",
|
|
"bool-int equivalence": r"""
|
|
In 2.2, boolean expressions displayed
|
|
0 or 1. By default, we still accept
|
|
them. This can be disabled by passing
|
|
DONT_ACCEPT_TRUE_FOR_1 to the new
|
|
optionflags argument.
|
|
>>> 4 == 4
|
|
1
|
|
>>> 4 == 4
|
|
True
|
|
>>> 4 > 4
|
|
0
|
|
>>> 4 > 4
|
|
False
|
|
""",
|
|
"blank lines": r"""
|
|
Blank lines can be marked with <BLANKLINE>:
|
|
>>> print 'foo\n\nbar\n'
|
|
foo
|
|
<BLANKLINE>
|
|
bar
|
|
<BLANKLINE>
|
|
""",
|
|
}
|
|
# "ellipsis": r"""
|
|
# If the ellipsis flag is used, then '...' can be used to
|
|
# elide substrings in the desired output:
|
|
# >>> print range(1000)
|
|
# [0, 1, 2, ..., 999]
|
|
# """,
|
|
# "whitespace normalization": r"""
|
|
# If the whitespace normalization flag is used, then
|
|
# differences in whitespace are ignored.
|
|
# >>> print range(30)
|
|
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
|
# 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
|
|
# 27, 28, 29]
|
|
# """,
|
|
# }
|
|
|
|
def test1(): r"""
|
|
>>> from doctest import Tester
|
|
>>> t = Tester(globs={'x': 42}, verbose=0)
|
|
>>> t.runstring(r'''
|
|
... >>> x = x * 2
|
|
... >>> print x
|
|
... 42
|
|
... ''', 'XYZ')
|
|
**********************************************************************
|
|
Failure in example: print x
|
|
from line #2 of XYZ
|
|
Expected: 42
|
|
Got: 84
|
|
(1, 2)
|
|
>>> t.runstring(">>> x = x * 2\n>>> print x\n84\n", 'example2')
|
|
(0, 2)
|
|
>>> t.summarize()
|
|
**********************************************************************
|
|
1 items had failures:
|
|
1 of 2 in XYZ
|
|
***Test Failed*** 1 failures.
|
|
(1, 4)
|
|
>>> t.summarize(verbose=1)
|
|
1 items passed all tests:
|
|
2 tests in example2
|
|
**********************************************************************
|
|
1 items had failures:
|
|
1 of 2 in XYZ
|
|
4 tests in 2 items.
|
|
3 passed and 1 failed.
|
|
***Test Failed*** 1 failures.
|
|
(1, 4)
|
|
"""
|
|
|
|
def test2(): r"""
|
|
>>> t = Tester(globs={}, verbose=1)
|
|
>>> test = r'''
|
|
... # just an example
|
|
... >>> x = 1 + 2
|
|
... >>> x
|
|
... 3
|
|
... '''
|
|
>>> t.runstring(test, "Example")
|
|
Running string Example
|
|
Trying: x = 1 + 2
|
|
Expecting: nothing
|
|
ok
|
|
Trying: x
|
|
Expecting: 3
|
|
ok
|
|
0 of 2 examples failed in string Example
|
|
(0, 2)
|
|
"""
|
|
def test3(): r"""
|
|
>>> t = Tester(globs={}, verbose=0)
|
|
>>> def _f():
|
|
... '''Trivial docstring example.
|
|
... >>> assert 2 == 2
|
|
... '''
|
|
... return 32
|
|
...
|
|
>>> t.rundoc(_f) # expect 0 failures in 1 example
|
|
(0, 1)
|
|
"""
|
|
def test4(): """
|
|
>>> import new
|
|
>>> m1 = new.module('_m1')
|
|
>>> m2 = new.module('_m2')
|
|
>>> test_data = \"""
|
|
... def _f():
|
|
... '''>>> assert 1 == 1
|
|
... '''
|
|
... def g():
|
|
... '''>>> assert 2 != 1
|
|
... '''
|
|
... class H:
|
|
... '''>>> assert 2 > 1
|
|
... '''
|
|
... def bar(self):
|
|
... '''>>> assert 1 < 2
|
|
... '''
|
|
... \"""
|
|
>>> exec test_data in m1.__dict__
|
|
>>> exec test_data in m2.__dict__
|
|
>>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H})
|
|
|
|
Tests that objects outside m1 are excluded:
|
|
|
|
>>> t = Tester(globs={}, verbose=0, isprivate=is_private)
|
|
>>> t.rundict(m1.__dict__, "rundict_test", m1) # _f, f2 and g2 and h2 skipped
|
|
(0, 3)
|
|
|
|
Again, but with the default isprivate function allowing _f:
|
|
|
|
>>> t = Tester(globs={}, verbose=0)
|
|
>>> t.rundict(m1.__dict__, "rundict_test_pvt", m1) # Only f2, g2 and h2 skipped
|
|
(0, 4)
|
|
|
|
And once more, not excluding stuff outside m1:
|
|
|
|
>>> t = Tester(globs={}, verbose=0)
|
|
>>> t.rundict(m1.__dict__, "rundict_test_pvt") # None are skipped.
|
|
(0, 8)
|
|
|
|
The exclusion of objects from outside the designated module is
|
|
meant to be invoked automagically by testmod.
|
|
|
|
>>> testmod(m1, isprivate=is_private, verbose=False)
|
|
(0, 3)
|
|
"""
|
|
|
|
def _test():
|
|
#import doctest
|
|
#doctest.testmod(doctest, verbose=False,
|
|
# optionflags=ELLIPSIS | NORMALIZE_WHITESPACE |
|
|
# UNIFIED_DIFF)
|
|
#print '~'*70
|
|
r = unittest.TextTestRunner()
|
|
r.run(DocTestSuite())
|
|
|
|
if __name__ == "__main__":
|
|
_test()
|