mirror of https://github.com/python/cpython
ec99568ce4 | ||
---|---|---|
.. | ||
crashers | ||
decimaltestdata | ||
leakers | ||
185test.db | ||
README | ||
__init__.py | ||
audiotest.au | ||
autotest.py | ||
bad_coding.py | ||
bad_coding2.py | ||
badcert.pem | ||
badkey.pem | ||
badsyntax_3131.py | ||
badsyntax_future3.py | ||
badsyntax_future4.py | ||
badsyntax_future5.py | ||
badsyntax_future6.py | ||
badsyntax_future7.py | ||
badsyntax_future8.py | ||
badsyntax_future9.py | ||
badsyntax_nocaret.py | ||
badsyntax_pep3120.py | ||
buffer_tests.py | ||
cfgparser.1 | ||
check_soundcard.vbs | ||
cjkencodings_test.py | ||
cmath_testcases.txt | ||
curses_tests.py | ||
dis_module.py | ||
doctest_aliases.py | ||
double_const.py | ||
empty.vbs | ||
exception_hierarchy.txt | ||
floating_points.txt | ||
fork_wait.py | ||
greyrgb.uue | ||
https_svn_python_org_root.pem | ||
ieee754.txt | ||
inspect_fodder.py | ||
inspect_fodder2.py | ||
keycert.pem | ||
list_tests.py | ||
mapping_tests.py | ||
nullcert.pem | ||
outstanding_bugs.py | ||
pickletester.py | ||
profilee.py | ||
pyclbr_input.py | ||
pydoc_mod.py | ||
pydocfodder.py | ||
pystone.py | ||
randv2_32.pck | ||
randv2_64.pck | ||
randv3.pck | ||
re_tests.py | ||
regex_tests.py | ||
regrtest.py | ||
relimport.py | ||
reperf.py | ||
sample_doctest.py | ||
seq_tests.py | ||
sgml_input.html | ||
sortperf.py | ||
ssl_cert.pem | ||
ssl_key.pem | ||
string_tests.py | ||
support.py | ||
svn_python_org_https_cert.pem | ||
test.xml | ||
test.xml.out | ||
test_SimpleHTTPServer.py | ||
test___all__.py | ||
test___future__.py | ||
test__locale.py | ||
test_abc.py | ||
test_abstract_numbers.py | ||
test_array.py | ||
test_ast.py | ||
test_asynchat.py | ||
test_asyncore.py | ||
test_atexit.py | ||
test_audioop.py | ||
test_augassign.py | ||
test_base64.py | ||
test_bigaddrspace.py | ||
test_bigmem.py | ||
test_binascii.py | ||
test_binhex.py | ||
test_binop.py | ||
test_bisect.py | ||
test_bool.py | ||
test_bsddb.py | ||
test_bsddb3.py | ||
test_bufio.py | ||
test_builtin.py | ||
test_bytes.py | ||
test_bz2.py | ||
test_cProfile.py | ||
test_calendar.py | ||
test_call.py | ||
test_capi.py | ||
test_cfgparser.py | ||
test_cgi.py | ||
test_charmapcodec.py | ||
test_class.py | ||
test_cmath.py | ||
test_cmd.py | ||
test_cmd_line.py | ||
test_cmd_line_script.py | ||
test_code.py | ||
test_codeccallbacks.py | ||
test_codecencodings_cn.py | ||
test_codecencodings_hk.py | ||
test_codecencodings_jp.py | ||
test_codecencodings_kr.py | ||
test_codecencodings_tw.py | ||
test_codecmaps_cn.py | ||
test_codecmaps_hk.py | ||
test_codecmaps_jp.py | ||
test_codecmaps_kr.py | ||
test_codecmaps_tw.py | ||
test_codecs.py | ||
test_codeop.py | ||
test_coding.py | ||
test_collections.py | ||
test_colorsys.py | ||
test_compare.py | ||
test_compile.py | ||
test_complex.py | ||
test_contains.py | ||
test_contextlib.py | ||
test_copy.py | ||
test_copyreg.py | ||
test_crypt.py | ||
test_csv.py | ||
test_ctypes.py | ||
test_curses.py | ||
test_datetime.py | ||
test_dbm.py | ||
test_dbm_dumb.py | ||
test_dbm_gnu.py | ||
test_dbm_ndbm.py | ||
test_decimal.py | ||
test_decorators.py | ||
test_defaultdict.py | ||
test_deque.py | ||
test_descr.py | ||
test_descrtut.py | ||
test_dict.py | ||
test_dictcomps.py | ||
test_dictviews.py | ||
test_difflib.py | ||
test_difflib_expect.html | ||
test_dis.py | ||
test_distutils.py | ||
test_doctest.py | ||
test_doctest.txt | ||
test_doctest2.py | ||
test_doctest2.txt | ||
test_doctest3.txt | ||
test_doctest4.txt | ||
test_docxmlrpc.py | ||
test_dummy_thread.py | ||
test_dummy_threading.py | ||
test_email.py | ||
test_enumerate.py | ||
test_eof.py | ||
test_epoll.py | ||
test_errno.py | ||
test_exception_variations.py | ||
test_exceptions.py | ||
test_extcall.py | ||
test_fcntl.py | ||
test_file.py | ||
test_filecmp.py | ||
test_fileinput.py | ||
test_fileio.py | ||
test_float.py | ||
test_fnmatch.py | ||
test_fork1.py | ||
test_format.py | ||
test_fractions.py | ||
test_frozen.py | ||
test_ftplib.py | ||
test_funcattrs.py | ||
test_functools.py | ||
test_future.py | ||
test_future1.py | ||
test_future2.py | ||
test_future3.py | ||
test_future4.py | ||
test_gc.py | ||
test_generators.py | ||
test_genericpath.py | ||
test_genexps.py | ||
test_getargs2.py | ||
test_getopt.py | ||
test_gettext.py | ||
test_glob.py | ||
test_global.py | ||
test_grammar.py | ||
test_grp.py | ||
test_gzip.py | ||
test_hash.py | ||
test_hashlib.py | ||
test_heapq.py | ||
test_hmac.py | ||
test_htmlparser.py | ||
test_http_cookiejar.py | ||
test_http_cookies.py | ||
test_httplib.py | ||
test_httpservers.py | ||
test_imaplib.py | ||
test_imp.py | ||
test_import.py | ||
test_importhooks.py | ||
test_index.py | ||
test_inspect.py | ||
test_int.py | ||
test_int_literal.py | ||
test_io.py | ||
test_ioctl.py | ||
test_isinstance.py | ||
test_iter.py | ||
test_iterlen.py | ||
test_itertools.py | ||
test_json.py | ||
test_keywordonlyarg.py | ||
test_kqueue.py | ||
test_largefile.py | ||
test_lib2to3.py | ||
test_list.py | ||
test_listcomps.py | ||
test_locale.py | ||
test_logging.py | ||
test_long.py | ||
test_longexp.py | ||
test_macpath.py | ||
test_mailbox.py | ||
test_marshal.py | ||
test_math.py | ||
test_memoryio.py | ||
test_memoryview.py | ||
test_metaclass.py | ||
test_mimetypes.py | ||
test_minidom.py | ||
test_mmap.py | ||
test_module.py | ||
test_modulefinder.py | ||
test_multibytecodec.py | ||
test_multibytecodec_support.py | ||
test_multiprocessing.py | ||
test_mutants.py | ||
test_netrc.py | ||
test_nis.py | ||
test_normalization.py | ||
test_ntpath.py | ||
test_opcodes.py | ||
test_openpty.py | ||
test_operator.py | ||
test_optparse.py | ||
test_os.py | ||
test_ossaudiodev.py | ||
test_parser.py | ||
test_peepholer.py | ||
test_pep247.py | ||
test_pep263.py | ||
test_pep277.py | ||
test_pep292.py | ||
test_pep352.py | ||
test_pep3120.py | ||
test_pep3131.py | ||
test_pickle.py | ||
test_pickletools.py | ||
test_pipes.py | ||
test_pkg.py | ||
test_pkgimport.py | ||
test_pkgutil.py | ||
test_platform.py | ||
test_plistlib.py | ||
test_poll.py | ||
test_popen.py | ||
test_poplib.py | ||
test_posix.py | ||
test_posixpath.py | ||
test_pow.py | ||
test_pprint.py | ||
test_print.py | ||
test_profile.py | ||
test_profilehooks.py | ||
test_property.py | ||
test_pstats.py | ||
test_pty.py | ||
test_pwd.py | ||
test_pyclbr.py | ||
test_pydoc.py | ||
test_pyexpat.py | ||
test_queue.py | ||
test_quopri.py | ||
test_raise.py | ||
test_random.py | ||
test_range.py | ||
test_re.py | ||
test_reprlib.py | ||
test_resource.py | ||
test_richcmp.py | ||
test_robotparser.py | ||
test_runpy.py | ||
test_sax.py | ||
test_scope.py | ||
test_select.py | ||
test_set.py | ||
test_setcomps.py | ||
test_shelve.py | ||
test_shlex.py | ||
test_shutil.py | ||
test_signal.py | ||
test_site.py | ||
test_slice.py | ||
test_smtplib.py | ||
test_socket.py | ||
test_socketserver.py | ||
test_sort.py | ||
test_sqlite.py | ||
test_ssl.py | ||
test_startfile.py | ||
test_strftime.py | ||
test_string.py | ||
test_stringprep.py | ||
test_strlit.py | ||
test_strptime.py | ||
test_struct.py | ||
test_structmembers.py | ||
test_structseq.py | ||
test_subprocess.py | ||
test_sundry.py | ||
test_super.py | ||
test_symtable.py | ||
test_syntax.py | ||
test_sys.py | ||
test_syslog.py | ||
test_tarfile.py | ||
test_tcl.py | ||
test_telnetlib.py | ||
test_tempfile.py | ||
test_textwrap.py | ||
test_thread.py | ||
test_threaded_import.py | ||
test_threadedtempfile.py | ||
test_threading.py | ||
test_threading_local.py | ||
test_threadsignals.py | ||
test_time.py | ||
test_timeout.py | ||
test_tokenize.py | ||
test_trace.py | ||
test_traceback.py | ||
test_tuple.py | ||
test_typechecks.py | ||
test_types.py | ||
test_ucn.py | ||
test_unary.py | ||
test_unicode.py | ||
test_unicode_file.py | ||
test_unicodedata.py | ||
test_unittest.py | ||
test_univnewlines.py | ||
test_unpack.py | ||
test_unpack_ex.py | ||
test_urllib.py | ||
test_urllib2.py | ||
test_urllib2_localnet.py | ||
test_urllib2net.py | ||
test_urllibnet.py | ||
test_urlparse.py | ||
test_userdict.py | ||
test_userlist.py | ||
test_userstring.py | ||
test_uu.py | ||
test_uuid.py | ||
test_wait3.py | ||
test_wait4.py | ||
test_warnings.py | ||
test_wave.py | ||
test_weakref.py | ||
test_weakset.py | ||
test_winreg.py | ||
test_winsound.py | ||
test_with.py | ||
test_wsgiref.py | ||
test_xdrlib.py | ||
test_xml_etree.py | ||
test_xml_etree_c.py | ||
test_xmlrpc.py | ||
test_xmlrpc_net.py | ||
test_zipfile.py | ||
test_zipfile64.py | ||
test_zipimport.py | ||
test_zlib.py | ||
testcodec.py | ||
testimg.uue | ||
testimgr.uue | ||
testrgb.uue | ||
testtar.tar | ||
tf_inherit_check.py | ||
threaded_import_hangers.py | ||
time_hashlib.py | ||
tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt | ||
tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt | ||
tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt | ||
tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt | ||
tokenize_tests.txt | ||
warning_tests.py | ||
xmltests.py |
README
+++++++++++++++++++++++++++++++ Writing Python Regression Tests +++++++++++++++++++++++++++++++ :Author: Skip Montanaro :Contact: skip@pobox.com Introduction ============ If you add a new module to Python or modify the functionality of an existing module, you should write one or more test cases to exercise that new functionality. There are different ways to do this within the regression testing facility provided with Python; any particular test should use only one of these options. Each option requires writing a test module using the conventions of the selected option: - unittest_ based tests - doctest_ based tests - "traditional" Python test modules Regardless of the mechanics of the testing approach you choose, you will be writing unit tests (isolated tests of functions and objects defined by the module) using white box techniques. Unlike black box testing, where you only have the external interfaces to guide your test case writing, in white box testing you can see the code being tested and tailor your test cases to exercise it more completely. In particular, you will be able to refer to the C and Python code in the CVS repository when writing your regression test cases. .. _unittest: http://www.python.org/doc/current/lib/module-unittest.html .. _doctest: http://www.python.org/doc/current/lib/module-doctest.html unittest-based tests ------------------ The unittest_ framework is based on the ideas of unit testing as espoused by Kent Beck and the `Extreme Programming`_ (XP) movement. The specific interface provided by the framework is tightly based on the JUnit_ Java implementation of Beck's original SmallTalk test framework. Please see the documentation of the unittest_ module for detailed information on the interface and general guidelines on writing unittest-based tests. The test_support helper module provides a function for use by unittest-based tests in the Python regression testing framework, ``run_unittest()``. This is the primary way of running tests in the standard library. You can pass it any number of the following: - classes derived from or instances of ``unittest.TestCase`` or ``unittest.TestSuite``. These will be handed off to unittest for converting into a proper TestSuite instance. - a string; this must be a key in sys.modules. The module associated with that string will be scanned by ``unittest.TestLoader.loadTestsFromModule``. This is usually seen as ``test_support.run_unittest(__name__)`` in a test module's ``test_main()`` function. This has the advantage of picking up new tests automatically, without you having to add each new test case manually. All test methods in the Python regression framework have names that start with "``test_``" and use lower-case names with words separated with underscores. Test methods should *not* have docstrings! The unittest module prints the docstring if there is one, but otherwise prints the function name and the full class name. When there's a problem with a test, the latter information makes it easier to find the source for the test than the docstring. All unittest-based tests in the Python test suite use boilerplate that looks like this (with minor variations):: import unittest from test import test_support class MyTestCase1(unittest.TestCase): # Define setUp and tearDown only if needed def setUp(self): unittest.TestCase.setUp(self) ... additional initialization... def tearDown(self): ... additional finalization... unittest.TestCase.tearDown(self) def test_feature_one(self): # Testing feature one ...unit test for feature one... def test_feature_two(self): # Testing feature two ...unit test for feature two... ...etc... class MyTestCase2(unittest.TestCase): ...same structure as MyTestCase1... ...etc... def test_main(): test_support.run_unittest(__name__) if __name__ == "__main__": test_main() This has the advantage that it allows the unittest module to be used as a script to run individual tests as well as working well with the regrtest framework. .. _Extreme Programming: http://www.extremeprogramming.org/ .. _JUnit: http://www.junit.org/ doctest based tests ------------------- Tests written to use doctest_ are actually part of the docstrings for the module being tested. Each test is written as a display of an interactive session, including the Python prompts, statements that would be typed by the user, and the output of those statements (including tracebacks, although only the exception msg needs to be retained then). The module in the test package is simply a wrapper that causes doctest to run over the tests in the module. The test for the difflib module provides a convenient example:: import difflib from test import test_support test_support.run_doctest(difflib) If the test is successful, nothing is written to stdout (so you should not create a corresponding output/test_difflib file), but running regrtest with -v will give a detailed report, the same as if passing -v to doctest. A second argument can be passed to run_doctest to tell doctest to search ``sys.argv`` for -v instead of using test_support's idea of verbosity. This is useful for writing doctest-based tests that aren't simply running a doctest'ed Lib module, but contain the doctests themselves. Then at times you may want to run such a test directly as a doctest, independent of the regrtest framework. The tail end of test_descrtut.py is a good example:: def test_main(verbose=None): from test import test_support, test_descrtut test_support.run_doctest(test_descrtut, verbose) if __name__ == "__main__": test_main(1) If run via regrtest, ``test_main()`` is called (by regrtest) without specifying verbose, and then test_support's idea of verbosity is used. But when run directly, ``test_main(1)`` is called, and then doctest's idea of verbosity is used. See the documentation for the doctest module for information on writing tests using the doctest framework. "traditional" Python test modules --------------------------------- The mechanics of how the "traditional" test system operates are fairly straightforward. When a test case is run, the output is compared with the expected output that is stored in .../Lib/test/output. If the test runs to completion and the actual and expected outputs match, the test succeeds, if not, it fails. If an ``ImportError`` or ``test_support.TestSkipped`` error is raised, the test is not run. Executing Test Cases ==================== If you are writing test cases for module spam, you need to create a file in .../Lib/test named test_spam.py. In addition, if the tests are expected to write to stdout during a successful run, you also need to create an expected output file in .../Lib/test/output named test_spam ("..." represents the top-level directory in the Python source tree, the directory containing the configure script). If needed, generate the initial version of the test output file by executing:: ./python Lib/test/regrtest.py -g test_spam.py from the top-level directory. Any time you modify test_spam.py you need to generate a new expected output file. Don't forget to desk check the generated output to make sure it's really what you expected to find! All in all it's usually better not to have an expected-out file (note that doctest- and unittest-based tests do not). To run a single test after modifying a module, simply run regrtest.py without the -g flag:: ./python Lib/test/regrtest.py test_spam.py While debugging a regression test, you can of course execute it independently of the regression testing framework and see what it prints:: ./python Lib/test/test_spam.py To run the entire test suite: - [UNIX, + other platforms where "make" works] Make the "test" target at the top level:: make test - [WINDOWS] Run rt.bat from your PCBuild directory. Read the comments at the top of rt.bat for the use of special -d, -O and -q options processed by rt.bat. - [OTHER] You can simply execute the two runs of regrtest (optimized and non-optimized) directly:: ./python Lib/test/regrtest.py ./python -O Lib/test/regrtest.py But note that this way picks up whatever .pyc and .pyo files happen to be around. The makefile and rt.bat ways run the tests twice, the first time removing all .pyc and .pyo files from the subtree rooted at Lib/. Test cases generate output based upon values computed by the test code. When executed, regrtest.py compares the actual output generated by executing the test case with the expected output and reports success or failure. It stands to reason that if the actual and expected outputs are to match, they must not contain any machine dependencies. This means your test cases should not print out absolute machine addresses (e.g. the return value of the id() builtin function) or floating point numbers with large numbers of significant digits (unless you understand what you are doing!). Test Case Writing Tips ====================== Writing good test cases is a skilled task and is too complex to discuss in detail in this short document. Many books have been written on the subject. I'll show my age by suggesting that Glenford Myers' `"The Art of Software Testing"`_, published in 1979, is still the best introduction to the subject available. It is short (177 pages), easy to read, and discusses the major elements of software testing, though its publication predates the object-oriented software revolution, so doesn't cover that subject at all. Unfortunately, it is very expensive (about $100 new). If you can borrow it or find it used (around $20), I strongly urge you to pick up a copy. The most important goal when writing test cases is to break things. A test case that doesn't uncover a bug is much less valuable than one that does. In designing test cases you should pay attention to the following: * Your test cases should exercise all the functions and objects defined in the module, not just the ones meant to be called by users of your module. This may require you to write test code that uses the module in ways you don't expect (explicitly calling internal functions, for example - see test_atexit.py). * You should consider any boundary values that may tickle exceptional conditions (e.g. if you were writing regression tests for division, you might well want to generate tests with numerators and denominators at the limits of floating point and integer numbers on the machine performing the tests as well as a denominator of zero). * You should exercise as many paths through the code as possible. This may not always be possible, but is a goal to strive for. In particular, when considering if statements (or their equivalent), you want to create test cases that exercise both the true and false branches. For loops, you should create test cases that exercise the loop zero, one and multiple times. * You should test with obviously invalid input. If you know that a function requires an integer input, try calling it with other types of objects to see how it responds. * You should test with obviously out-of-range input. If the domain of a function is only defined for positive integers, try calling it with a negative integer. * If you are going to fix a bug that wasn't uncovered by an existing test, try to write a test case that exposes the bug (preferably before fixing it). * If you need to create a temporary file, you can use the filename in ``test_support.TESTFN`` to do so. It is important to remove the file when done; other tests should be able to use the name without cleaning up after your test. .. _"The Art of Software Testing": http://www.amazon.com/exec/obidos/ISBN=0471043281 Regression Test Writing Rules ============================= Each test case is different. There is no "standard" form for a Python regression test case, though there are some general rules (note that these mostly apply only to the "classic" tests; unittest_- and doctest_- based tests should follow the conventions natural to those frameworks):: * If your test case detects a failure, raise ``TestFailed`` (found in ``test.test_support``). * Import everything you'll need as early as possible. * If you'll be importing objects from a module that is at least partially platform-dependent, only import those objects you need for the current test case to avoid spurious ``ImportError`` exceptions that prevent the test from running to completion. * Print all your test case results using the ``print`` statement. For non-fatal errors, print an error message (or omit a successful completion print) to indicate the failure, but proceed instead of raising ``TestFailed``. * Use ``assert`` sparingly, if at all. It's usually better to just print what you got, and rely on regrtest's got-vs-expected comparison to catch deviations from what you expect. ``assert`` statements aren't executed at all when regrtest is run in -O mode; and, because they cause the test to stop immediately, can lead to a long & tedious test-fix, test-fix, test-fix, ... cycle when things are badly broken (and note that "badly broken" often includes running the test suite for the first time on new platforms or under new implementations of the language). Miscellaneous ============= There is a test_support module in the test package you can import for your test case. Import this module using either:: import test.test_support or:: from test import test_support test_support provides the following useful objects: * ``TestFailed`` - raise this exception when your regression test detects a failure. * ``TestSkipped`` - raise this if the test could not be run because the platform doesn't offer all the required facilities (like large file support), even if all the required modules are available. * ``ResourceDenied`` - this is raised when a test requires a resource that is not available. Primarily used by 'requires'. * ``verbose`` - you can use this variable to control print output. Many modules use it. Search for "verbose" in the test_*.py files to see lots of examples. * ``forget(module_name)`` - attempts to cause Python to "forget" that it loaded a module and erase any PYC files. * ``is_resource_enabled(resource)`` - Returns a boolean based on whether the resource is enabled or not. * ``requires(resource [, msg])`` - if the required resource is not available the ResourceDenied exception is raised. * ``verify(condition, reason='test failed')``. Use this instead of:: assert condition[, reason] ``verify()`` has two advantages over ``assert``: it works even in -O mode, and it raises ``TestFailed`` on failure instead of ``AssertionError``. * ``is_jython`` - true if the interpreter is Jython, false otherwise. * ``TESTFN`` - a string that should always be used as the filename when you need to create a temp file. Also use ``try``/``finally`` to ensure that your temp files are deleted before your test completes. Note that you cannot unlink an open file on all operating systems, so also be sure to close temp files before trying to unlink them. * ``sortdict(dict)`` - acts like ``repr(dict.items())``, but sorts the items first. This is important when printing a dict value, because the order of items produced by ``dict.items()`` is not defined by the language. * ``findfile(file)`` - you can call this function to locate a file somewhere along sys.path or in the Lib/test tree - see test_ossaudiodev.py for an example of its use. * ``fcmp(x,y)`` - you can call this function to compare two floating point numbers when you expect them to only be approximately equal withing a fuzz factor (``test_support.FUZZ``, which defaults to 1e-6). * ``check_syntax_error(testcase, statement)`` - make sure that the statement is *not* correct Python syntax. Some Non-Obvious regrtest Features ================================== * Automagic test detection: When you create a new test file test_spam.py, you do not need to modify regrtest (or anything else) to advertise its existence. regrtest searches for and runs all modules in the test directory with names of the form test_xxx.py. * Miranda output: If, when running test_spam.py, regrtest does not find an expected-output file test/output/test_spam, regrtest pretends that it did find one, containing the single line test_spam This allows new tests that don't expect to print anything to stdout to not bother creating expected-output files. * Two-stage testing: To run test_spam.py, regrtest imports test_spam as a module. Most tests run to completion as a side-effect of getting imported. After importing test_spam, regrtest also executes ``test_spam.test_main()``, if test_spam has a ``test_main`` attribute. This is rarely required with the "traditional" Python tests, and you shouldn't create a module global with name test_main unless you're specifically exploiting this gimmick. This usage does prove useful with unittest-based tests as well, however; defining a ``test_main()`` which is run by regrtest and a script-stub in the test module ("``if __name__ == '__main__': test_main()``") allows the test to be used like any other Python test and also work with the unittest.py-as-a-script approach, allowing a developer to run specific tests from the command line.