diff --git a/Lib/html/parser.py b/Lib/html/parser.py index a65478058f4..9db8ab582be 100644 --- a/Lib/html/parser.py +++ b/Lib/html/parser.py @@ -187,17 +187,10 @@ class HTMLParser(_markupbase.ParserBase): elif startswith(" or - # . When strict is True an - # error is raised, when it's False they will be considered - # as bogus comments and parsed (see parse_bogus_comment). if self.strict: k = self.parse_declaration(i) else: - try: - k = self.parse_declaration(i) - except HTMLParseError: - k = self.parse_bogus_comment(i) + k = self.parse_html_declaration(i) elif (i + 1) < n: self.handle_data("<") k = i + 1 @@ -269,6 +262,27 @@ class HTMLParser(_markupbase.ParserBase): i = self.updatepos(i, n) self.rawdata = rawdata[i:] + # Internal -- parse html declarations, return length or -1 if not terminated + # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state + # See also parse_declaration in _markupbase + def parse_html_declaration(self, i): + rawdata = self.rawdata + if rawdata[i:i+2] != ' + gtpos = rawdata.find('>', 9) + if gtpos == -1: + return -1 + self.handle_decl(rawdata[i+2:gtpos]) + return gtpos+1 + else: + return self.parse_bogus_comment(i) + # Internal -- parse bogus comment, return length or -1 if not terminated # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state def parse_bogus_comment(self, i, report=1): diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py index 2e7277e573d..1da2ce4f9b7 100644 --- a/Lib/test/test_htmlparser.py +++ b/Lib/test/test_htmlparser.py @@ -93,7 +93,7 @@ class TestCaseBase(unittest.TestCase): def _parse_error(self, source): def parse(source=source): - parser = html.parser.HTMLParser() + parser = self.get_collector() parser.feed(source) parser.close() self.assertRaises(html.parser.HTMLParseError, parse) @@ -122,7 +122,7 @@ comment1b--> sample text “ - + """, [ ("data", "\n"), @@ -157,24 +157,6 @@ text ("data", " foo"), ]) - def test_doctype_decl(self): - inside = """\ -DOCTYPE html [ - - - - - - - %paramEntity; - -]""" - self._run_check("" % inside, [ - ("decl", inside), - ]) - def test_bad_nesting(self): # Strangely, this *is* supposed to test that overlapping # elements are allowed. HTMLParser is more geared toward @@ -247,6 +229,30 @@ DOCTYPE html [ self._parse_error("" % dtd, + [('decl', 'DOCTYPE ' + dtd)]) + def test_declaration_junk_chars(self): self._parse_error("") @@ -368,6 +374,29 @@ class HTMLParserTolerantTestCase(HTMLParserStrictTestCase): ('comment', '/img'), ('endtag', 'html<')]) + def test_starttag_junk_chars(self): + self._run_check("", []) + self._run_check("", [('comment', '$')]) + self._run_check("", [('data', '", [('endtag', 'a'", [('data', "', + [('comment', 'spacer type="block" height="25"')]) + def test_with_unquoted_attributes(self): # see #12008 html = ("" @@ -476,7 +505,7 @@ class HTMLParserTolerantTestCase(HTMLParserStrictTestCase): self._run_check(html, expected) def test_unescape_function(self): - p = html.parser.HTMLParser() + p = self.get_collector() self.assertEqual(p.unescape('&#bad;'),'&#bad;') self.assertEqual(p.unescape('&'),'&') # see #12888 @@ -486,11 +515,14 @@ class HTMLParserTolerantTestCase(HTMLParserStrictTestCase): html = ('' '' '' + '' '') expected = [ ('comment', ' not really a comment '), ('comment', ' not a comment either --'), ('comment', ' -- close enough --'), + ('comment', ''), + ('comment', '<-- this was an empty comment'), ('comment', '!! another bogus comment !!!'), ] self._run_check(html, expected)