2000-03-31 11:44:52 -04:00
|
|
|
# Very simple test - Parse a file and print what happens
|
|
|
|
|
|
|
|
# XXX TypeErrors on calling handlers, or on bad return values from a
|
|
|
|
# handler, are obscure and unhelpful.
|
2000-10-23 14:22:08 -03:00
|
|
|
|
2001-07-30 18:47:25 -03:00
|
|
|
import pyexpat
|
2000-09-23 01:47:56 -03:00
|
|
|
from xml.parsers import expat
|
2000-10-23 14:22:08 -03:00
|
|
|
|
2002-07-23 16:04:11 -03:00
|
|
|
from test.test_support import sortdict, TestFailed
|
Get rid of the superstitious "~" in dict hashing's "i = (~hash) & mask".
The comment following used to say:
/* We use ~hash instead of hash, as degenerate hash functions, such
as for ints <sigh>, can have lots of leading zeros. It's not
really a performance risk, but better safe than sorry.
12-Dec-00 tim: so ~hash produces lots of leading ones instead --
what's the gain? */
That is, there was never a good reason for doing it. And to the contrary,
as explained on Python-Dev last December, it tended to make the *sum*
(i + incr) & mask (which is the first table index examined in case of
collison) the same "too often" across distinct hashes.
Changing to the simpler "i = hash & mask" reduced the number of string-dict
collisions (== # number of times we go around the lookup for-loop) from about
6 million to 5 million during a full run of the test suite (these are
approximate because the test suite does some random stuff from run to run).
The number of collisions in non-string dicts also decreased, but not as
dramatically.
Note that this may, for a given dict, change the order (wrt previous
releases) of entries exposed by .keys(), .values() and .items(). A number
of std tests suffered bogus failures as a result. For dicts keyed by
small ints, or (less so) by characters, the order is much more likely to be
in increasing order of key now; e.g.,
>>> d = {}
>>> for i in range(10):
... d[i] = i
...
>>> d
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
>>>
Unfortunately. people may latch on to that in small examples and draw a
bogus conclusion.
test_support.py
Moved test_extcall's sortdict() into test_support, made it stronger,
and imported sortdict into other std tests that needed it.
test_unicode.py
Excluced cp875 from the "roundtrip over range(128)" test, because
cp875 doesn't have a well-defined inverse for unicode("?", "cp875").
See Python-Dev for excruciating details.
Cookie.py
Chaged various output functions to sort dicts before building
strings from them.
test_extcall
Fiddled the expected-result file. This remains sensitive to native
dict ordering, because, e.g., if there are multiple errors in a
keyword-arg dict (and test_extcall sets up many cases like that), the
specific error Python complains about first depends on native dict
ordering.
2001-05-12 21:19:31 -03:00
|
|
|
|
2000-03-31 11:44:52 -04:00
|
|
|
class Outputter:
|
|
|
|
def StartElementHandler(self, name, attrs):
|
Get rid of the superstitious "~" in dict hashing's "i = (~hash) & mask".
The comment following used to say:
/* We use ~hash instead of hash, as degenerate hash functions, such
as for ints <sigh>, can have lots of leading zeros. It's not
really a performance risk, but better safe than sorry.
12-Dec-00 tim: so ~hash produces lots of leading ones instead --
what's the gain? */
That is, there was never a good reason for doing it. And to the contrary,
as explained on Python-Dev last December, it tended to make the *sum*
(i + incr) & mask (which is the first table index examined in case of
collison) the same "too often" across distinct hashes.
Changing to the simpler "i = hash & mask" reduced the number of string-dict
collisions (== # number of times we go around the lookup for-loop) from about
6 million to 5 million during a full run of the test suite (these are
approximate because the test suite does some random stuff from run to run).
The number of collisions in non-string dicts also decreased, but not as
dramatically.
Note that this may, for a given dict, change the order (wrt previous
releases) of entries exposed by .keys(), .values() and .items(). A number
of std tests suffered bogus failures as a result. For dicts keyed by
small ints, or (less so) by characters, the order is much more likely to be
in increasing order of key now; e.g.,
>>> d = {}
>>> for i in range(10):
... d[i] = i
...
>>> d
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
>>>
Unfortunately. people may latch on to that in small examples and draw a
bogus conclusion.
test_support.py
Moved test_extcall's sortdict() into test_support, made it stronger,
and imported sortdict into other std tests that needed it.
test_unicode.py
Excluced cp875 from the "roundtrip over range(128)" test, because
cp875 doesn't have a well-defined inverse for unicode("?", "cp875").
See Python-Dev for excruciating details.
Cookie.py
Chaged various output functions to sort dicts before building
strings from them.
test_extcall
Fiddled the expected-result file. This remains sensitive to native
dict ordering, because, e.g., if there are multiple errors in a
keyword-arg dict (and test_extcall sets up many cases like that), the
specific error Python complains about first depends on native dict
ordering.
2001-05-12 21:19:31 -03:00
|
|
|
print 'Start element:\n\t', repr(name), sortdict(attrs)
|
2000-10-23 14:22:08 -03:00
|
|
|
|
2000-03-31 11:44:52 -04:00
|
|
|
def EndElementHandler(self, name):
|
2000-06-26 21:37:25 -03:00
|
|
|
print 'End element:\n\t', repr(name)
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def CharacterDataHandler(self, data):
|
2000-09-21 17:32:13 -03:00
|
|
|
data = data.strip()
|
2000-03-31 11:44:52 -04:00
|
|
|
if data:
|
|
|
|
print 'Character data:'
|
|
|
|
print '\t', repr(data)
|
|
|
|
|
|
|
|
def ProcessingInstructionHandler(self, target, data):
|
2000-06-26 21:37:25 -03:00
|
|
|
print 'PI:\n\t', repr(target), repr(data)
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def StartNamespaceDeclHandler(self, prefix, uri):
|
2000-06-26 21:37:25 -03:00
|
|
|
print 'NS decl:\n\t', repr(prefix), repr(uri)
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def EndNamespaceDeclHandler(self, prefix):
|
2000-06-26 21:37:25 -03:00
|
|
|
print 'End of NS decl:\n\t', repr(prefix)
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def StartCdataSectionHandler(self):
|
2000-04-02 01:15:38 -04:00
|
|
|
print 'Start of CDATA section'
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def EndCdataSectionHandler(self):
|
2000-04-02 01:15:38 -04:00
|
|
|
print 'End of CDATA section'
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def CommentHandler(self, text):
|
2000-04-02 01:15:38 -04:00
|
|
|
print 'Comment:\n\t', repr(text)
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def NotationDeclHandler(self, *args):
|
|
|
|
name, base, sysid, pubid = args
|
2000-04-02 01:15:38 -04:00
|
|
|
print 'Notation declared:', args
|
2000-03-31 11:44:52 -04:00
|
|
|
|
|
|
|
def UnparsedEntityDeclHandler(self, *args):
|
|
|
|
entityName, base, systemId, publicId, notationName = args
|
|
|
|
print 'Unparsed entity decl:\n\t', args
|
2000-10-23 14:22:08 -03:00
|
|
|
|
2000-03-31 11:44:52 -04:00
|
|
|
def NotStandaloneHandler(self, userData):
|
|
|
|
print 'Not standalone'
|
|
|
|
return 1
|
2000-10-23 14:22:08 -03:00
|
|
|
|
2000-06-26 21:37:25 -03:00
|
|
|
def ExternalEntityRefHandler(self, *args):
|
|
|
|
context, base, sysId, pubId = args
|
2000-12-23 18:12:07 -04:00
|
|
|
print 'External entity ref:', args[1:]
|
2000-03-31 11:44:52 -04:00
|
|
|
return 1
|
|
|
|
|
|
|
|
def DefaultHandler(self, userData):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def DefaultHandlerExpand(self, userData):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2000-09-21 17:32:13 -03:00
|
|
|
def confirm(ok):
|
|
|
|
if ok:
|
|
|
|
print "OK."
|
|
|
|
else:
|
|
|
|
print "Not OK."
|
|
|
|
|
2000-03-31 11:44:52 -04:00
|
|
|
out = Outputter()
|
2000-09-23 01:47:56 -03:00
|
|
|
parser = expat.ParserCreate(namespace_separator='!')
|
2000-06-26 21:37:25 -03:00
|
|
|
|
|
|
|
# Test getting/setting returns_unicode
|
2000-09-21 17:32:13 -03:00
|
|
|
parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
|
|
|
|
parser.returns_unicode = 1; confirm(parser.returns_unicode == 1)
|
|
|
|
parser.returns_unicode = 2; confirm(parser.returns_unicode == 1)
|
|
|
|
parser.returns_unicode = 0; confirm(parser.returns_unicode == 0)
|
|
|
|
|
2001-04-25 13:03:54 -03:00
|
|
|
# Test getting/setting ordered_attributes
|
|
|
|
parser.ordered_attributes = 0; confirm(parser.ordered_attributes == 0)
|
|
|
|
parser.ordered_attributes = 1; confirm(parser.ordered_attributes == 1)
|
|
|
|
parser.ordered_attributes = 2; confirm(parser.ordered_attributes == 1)
|
|
|
|
parser.ordered_attributes = 0; confirm(parser.ordered_attributes == 0)
|
|
|
|
|
|
|
|
# Test getting/setting specified_attributes
|
|
|
|
parser.specified_attributes = 0; confirm(parser.specified_attributes == 0)
|
|
|
|
parser.specified_attributes = 1; confirm(parser.specified_attributes == 1)
|
|
|
|
parser.specified_attributes = 2; confirm(parser.specified_attributes == 1)
|
|
|
|
parser.specified_attributes = 0; confirm(parser.specified_attributes == 0)
|
|
|
|
|
2000-09-21 17:32:13 -03:00
|
|
|
HANDLER_NAMES = [
|
|
|
|
'StartElementHandler', 'EndElementHandler',
|
|
|
|
'CharacterDataHandler', 'ProcessingInstructionHandler',
|
|
|
|
'UnparsedEntityDeclHandler', 'NotationDeclHandler',
|
|
|
|
'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler',
|
|
|
|
'CommentHandler', 'StartCdataSectionHandler',
|
|
|
|
'EndCdataSectionHandler',
|
|
|
|
'DefaultHandler', 'DefaultHandlerExpand',
|
|
|
|
#'NotStandaloneHandler',
|
|
|
|
'ExternalEntityRefHandler'
|
|
|
|
]
|
2000-06-26 21:37:25 -03:00
|
|
|
for name in HANDLER_NAMES:
|
2000-09-21 17:32:13 -03:00
|
|
|
setattr(parser, name, getattr(out, name))
|
2000-03-31 11:44:52 -04:00
|
|
|
|
2000-09-21 17:32:13 -03:00
|
|
|
data = '''\
|
|
|
|
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
|
2000-03-31 11:44:52 -04:00
|
|
|
<?xml-stylesheet href="stylesheet.css"?>
|
|
|
|
<!-- comment data -->
|
|
|
|
<!DOCTYPE quotations SYSTEM "quotations.dtd" [
|
|
|
|
<!ELEMENT root ANY>
|
|
|
|
<!NOTATION notation SYSTEM "notation.jpeg">
|
|
|
|
<!ENTITY acirc "â">
|
|
|
|
<!ENTITY external_entity SYSTEM "entity.file">
|
|
|
|
<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
|
|
|
|
%unparsed_entity;
|
|
|
|
]>
|
|
|
|
|
2000-06-26 21:37:25 -03:00
|
|
|
<root attr1="value1" attr2="value2ὀ">
|
2000-03-31 11:44:52 -04:00
|
|
|
<myns:subelement xmlns:myns="http://www.python.org/namespace">
|
|
|
|
Contents of subelements
|
|
|
|
</myns:subelement>
|
|
|
|
<sub2><![CDATA[contents of CDATA section]]></sub2>
|
|
|
|
&external_entity;
|
|
|
|
</root>
|
2000-09-21 17:32:13 -03:00
|
|
|
'''
|
2000-03-31 11:44:52 -04:00
|
|
|
|
2000-06-26 21:37:25 -03:00
|
|
|
# Produce UTF-8 output
|
|
|
|
parser.returns_unicode = 0
|
2000-03-31 11:44:52 -04:00
|
|
|
try:
|
|
|
|
parser.Parse(data, 1)
|
2000-09-23 01:47:56 -03:00
|
|
|
except expat.error:
|
|
|
|
print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
|
2000-03-31 11:44:52 -04:00
|
|
|
print '** Line', parser.ErrorLineNumber
|
|
|
|
print '** Column', parser.ErrorColumnNumber
|
|
|
|
print '** Byte', parser.ErrorByteIndex
|
|
|
|
|
2000-06-26 21:37:25 -03:00
|
|
|
# Try the parse again, this time producing Unicode output
|
2000-09-23 01:47:56 -03:00
|
|
|
parser = expat.ParserCreate(namespace_separator='!')
|
2000-06-26 21:37:25 -03:00
|
|
|
parser.returns_unicode = 1
|
|
|
|
|
|
|
|
for name in HANDLER_NAMES:
|
2000-09-21 17:32:13 -03:00
|
|
|
setattr(parser, name, getattr(out, name))
|
2000-06-26 21:37:25 -03:00
|
|
|
try:
|
|
|
|
parser.Parse(data, 1)
|
2000-09-23 01:47:56 -03:00
|
|
|
except expat.error:
|
|
|
|
print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
|
2000-06-26 21:37:25 -03:00
|
|
|
print '** Line', parser.ErrorLineNumber
|
|
|
|
print '** Column', parser.ErrorColumnNumber
|
|
|
|
print '** Byte', parser.ErrorByteIndex
|
|
|
|
|
|
|
|
# Try parsing a file
|
2000-09-23 01:47:56 -03:00
|
|
|
parser = expat.ParserCreate(namespace_separator='!')
|
2000-06-26 21:37:25 -03:00
|
|
|
parser.returns_unicode = 1
|
|
|
|
|
|
|
|
for name in HANDLER_NAMES:
|
2000-09-21 17:32:13 -03:00
|
|
|
setattr(parser, name, getattr(out, name))
|
2000-06-26 21:37:25 -03:00
|
|
|
import StringIO
|
|
|
|
file = StringIO.StringIO(data)
|
|
|
|
try:
|
|
|
|
parser.ParseFile(file)
|
2000-09-23 01:47:56 -03:00
|
|
|
except expat.error:
|
|
|
|
print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
|
2000-06-26 21:37:25 -03:00
|
|
|
print '** Line', parser.ErrorLineNumber
|
|
|
|
print '** Column', parser.ErrorColumnNumber
|
|
|
|
print '** Byte', parser.ErrorByteIndex
|
2000-12-23 18:12:07 -04:00
|
|
|
|
|
|
|
|
|
|
|
# Tests that make sure we get errors when the namespace_separator value
|
|
|
|
# is illegal, and that we don't for good values:
|
|
|
|
print
|
|
|
|
print "Testing constructor for proper handling of namespace_separator values:"
|
|
|
|
expat.ParserCreate()
|
|
|
|
expat.ParserCreate(namespace_separator=None)
|
|
|
|
expat.ParserCreate(namespace_separator=' ')
|
|
|
|
print "Legal values tested o.k."
|
|
|
|
try:
|
|
|
|
expat.ParserCreate(namespace_separator=42)
|
|
|
|
except TypeError, e:
|
|
|
|
print "Caught expected TypeError:"
|
|
|
|
print e
|
|
|
|
else:
|
|
|
|
print "Failed to catch expected TypeError."
|
2001-04-25 13:03:54 -03:00
|
|
|
|
2000-12-23 18:12:07 -04:00
|
|
|
try:
|
|
|
|
expat.ParserCreate(namespace_separator='too long')
|
|
|
|
except ValueError, e:
|
|
|
|
print "Caught expected ValueError:"
|
|
|
|
print e
|
|
|
|
else:
|
|
|
|
print "Failed to catch expected ValueError."
|
2001-04-25 13:03:54 -03:00
|
|
|
|
|
|
|
# ParserCreate() needs to accept a namespace_separator of zero length
|
|
|
|
# to satisfy the requirements of RDF applications that are required
|
|
|
|
# to simply glue together the namespace URI and the localname. Though
|
|
|
|
# considered a wart of the RDF specifications, it needs to be supported.
|
|
|
|
#
|
|
|
|
# See XML-SIG mailing list thread starting with
|
|
|
|
# http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
|
|
|
|
#
|
|
|
|
expat.ParserCreate(namespace_separator='') # too short
|
2002-06-27 16:41:51 -03:00
|
|
|
|
|
|
|
# Test the interning machinery.
|
|
|
|
p = expat.ParserCreate()
|
|
|
|
L = []
|
|
|
|
def collector(name, *args):
|
|
|
|
L.append(name)
|
|
|
|
p.StartElementHandler = collector
|
|
|
|
p.EndElementHandler = collector
|
|
|
|
p.Parse("<e> <e/> <e></e> </e>", 1)
|
|
|
|
tag = L[0]
|
|
|
|
if len(L) != 6:
|
|
|
|
print "L should only contain 6 entries; found", len(L)
|
|
|
|
for entry in L:
|
|
|
|
if tag is not entry:
|
|
|
|
print "expected L to contain many references to the same string",
|
|
|
|
print "(it didn't)"
|
2004-02-12 13:35:32 -04:00
|
|
|
print "L =", repr(L)
|
2002-06-27 16:41:51 -03:00
|
|
|
break
|
2002-06-28 19:56:48 -03:00
|
|
|
|
|
|
|
# Tests of the buffer_text attribute.
|
|
|
|
import sys
|
|
|
|
|
|
|
|
class TextCollector:
|
|
|
|
def __init__(self, parser):
|
|
|
|
self.stuff = []
|
|
|
|
|
|
|
|
def check(self, expected, label):
|
|
|
|
require(self.stuff == expected,
|
2004-02-12 13:35:32 -04:00
|
|
|
"%s\nstuff = %r\nexpected = %r"
|
|
|
|
% (label, self.stuff, map(unicode, expected)))
|
2002-06-28 19:56:48 -03:00
|
|
|
|
|
|
|
def CharacterDataHandler(self, text):
|
|
|
|
self.stuff.append(text)
|
|
|
|
|
|
|
|
def StartElementHandler(self, name, attrs):
|
|
|
|
self.stuff.append("<%s>" % name)
|
|
|
|
bt = attrs.get("buffer-text")
|
|
|
|
if bt == "yes":
|
|
|
|
parser.buffer_text = 1
|
|
|
|
elif bt == "no":
|
|
|
|
parser.buffer_text = 0
|
|
|
|
|
|
|
|
def EndElementHandler(self, name):
|
|
|
|
self.stuff.append("</%s>" % name)
|
|
|
|
|
|
|
|
def CommentHandler(self, data):
|
|
|
|
self.stuff.append("<!--%s-->" % data)
|
|
|
|
|
|
|
|
def require(cond, label):
|
|
|
|
# similar to confirm(), but no extraneous output
|
|
|
|
if not cond:
|
|
|
|
raise TestFailed(label)
|
|
|
|
|
|
|
|
def setup(handlers=[]):
|
|
|
|
parser = expat.ParserCreate()
|
|
|
|
require(not parser.buffer_text,
|
|
|
|
"buffer_text not disabled by default")
|
|
|
|
parser.buffer_text = 1
|
|
|
|
handler = TextCollector(parser)
|
|
|
|
parser.CharacterDataHandler = handler.CharacterDataHandler
|
|
|
|
for name in handlers:
|
|
|
|
setattr(parser, name, getattr(handler, name))
|
|
|
|
return parser, handler
|
|
|
|
|
|
|
|
parser, handler = setup()
|
|
|
|
require(parser.buffer_text,
|
|
|
|
"text buffering either not acknowledged or not enabled")
|
|
|
|
parser.Parse("<a>1<b/>2<c/>3</a>", 1)
|
|
|
|
handler.check(["123"],
|
|
|
|
"buffered text not properly collapsed")
|
|
|
|
|
|
|
|
# XXX This test exposes more detail of Expat's text chunking than we
|
|
|
|
# XXX like, but it tests what we need to concisely.
|
|
|
|
parser, handler = setup(["StartElementHandler"])
|
|
|
|
parser.Parse("<a>1<b buffer-text='no'/>2\n3<c buffer-text='yes'/>4\n5</a>", 1)
|
|
|
|
handler.check(["<a>", "1", "<b>", "2", "\n", "3", "<c>", "4\n5"],
|
|
|
|
"buffering control not reacting as expected")
|
|
|
|
|
|
|
|
parser, handler = setup()
|
|
|
|
parser.Parse("<a>1<b/><2><c/> \n 3</a>", 1)
|
|
|
|
handler.check(["1<2> \n 3"],
|
|
|
|
"buffered text not properly collapsed")
|
|
|
|
|
|
|
|
parser, handler = setup(["StartElementHandler"])
|
|
|
|
parser.Parse("<a>1<b/>2<c/>3</a>", 1)
|
|
|
|
handler.check(["<a>", "1", "<b>", "2", "<c>", "3"],
|
|
|
|
"buffered text not properly split")
|
|
|
|
|
|
|
|
parser, handler = setup(["StartElementHandler", "EndElementHandler"])
|
|
|
|
parser.CharacterDataHandler = None
|
|
|
|
parser.Parse("<a>1<b/>2<c/>3</a>", 1)
|
|
|
|
handler.check(["<a>", "<b>", "</b>", "<c>", "</c>", "</a>"],
|
|
|
|
"huh?")
|
|
|
|
|
|
|
|
parser, handler = setup(["StartElementHandler", "EndElementHandler"])
|
|
|
|
parser.Parse("<a>1<b></b>2<c/>3</a>", 1)
|
|
|
|
handler.check(["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "</a>"],
|
|
|
|
"huh?")
|
|
|
|
|
|
|
|
parser, handler = setup(["CommentHandler", "EndElementHandler",
|
|
|
|
"StartElementHandler"])
|
|
|
|
parser.Parse("<a>1<b/>2<c></c>345</a> ", 1)
|
|
|
|
handler.check(["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "345", "</a>"],
|
|
|
|
"buffered text not properly split")
|
|
|
|
|
|
|
|
parser, handler = setup(["CommentHandler", "EndElementHandler",
|
|
|
|
"StartElementHandler"])
|
|
|
|
parser.Parse("<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", 1)
|
|
|
|
handler.check(["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3",
|
|
|
|
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
|
|
|
|
"buffered text not properly split")
|