Fiddled things so that test_normalization is expected to be skipped if

and only if the test input file doesn't exist.
This commit is contained in:
Tim Peters 2002-11-24 18:53:11 +00:00
parent 65730a4de8
commit 1b445d3fcf
2 changed files with 60 additions and 39 deletions

View File

@ -503,6 +503,10 @@ def printlist(x, width=70, indent=4):
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_normalization
# Whether a skip is expected here depends on whether a large test
# input file has been downloaded. test_normalization.skip_expected
# controls that
_expectations = {
'win32':
@ -528,7 +532,6 @@ _expectations = {
test_mhlib
test_mpz
test_nis
test_normalization
test_openpty
test_poll
test_pty
@ -829,12 +832,19 @@ _expectations = {
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_normalization
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = Set(s.split())
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
if test_normalization.skip_expected:
self.expected.add('test_normalization')
self.valid = True
def isvalid(self):

View File

@ -1,11 +1,10 @@
from test.test_support import verbose, TestFailed, TestSkipped, verify
import sys
import os
from unicodedata import normalize
try:
data = open("NormalizationTest.txt", "r").readlines()
except IOError:
raise TestSkipped("NormalizationTest.txt not found, download from "
"http://www.unicode.org/Public/UNIDATA/NormalizationTest.txt")
TESTDATAFILE = "NormalizationTest.txt"
skip_expected = not os.path.exists(TESTDATAFILE)
class RangeError:
pass
@ -29,8 +28,15 @@ def unistr(data):
raise RangeError
return u"".join([unichr(x) for x in data])
part1_data = {}
for line in data:
def test_main():
if skip_expected:
raise TestSkipped(TESTDATAFILE + " not found, download from " +
"http://www.unicode.org/Public/UNIDATA/" + TESTDATAFILE)
data = open(TESTDATAFILE).readlines()
part1_data = {}
for line in data:
if '#' in line:
line = line.split('#')[0]
line = line.strip()
@ -53,16 +59,21 @@ for line in data:
verify(c4 == NFC(c4) == NFC(c5), line)
verify(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
verify(c5 == NFD(c4) == NFD(c5), line)
verify(c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5), line)
verify(c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5), line)
verify(c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
verify(c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = unichr(c)
if X in part1_data:
continue
assert X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c
if __name__ == "__main__":
test_main()