diff --git a/Lib/html/parser.py b/Lib/html/parser.py
index a65478058f4..9db8ab582be 100644
--- a/Lib/html/parser.py
+++ b/Lib/html/parser.py
@@ -187,17 +187,10 @@ class HTMLParser(_markupbase.ParserBase):
elif startswith("", i):
k = self.parse_pi(i)
elif startswith(" or
- # . When strict is True an
- # error is raised, when it's False they will be considered
- # as bogus comments and parsed (see parse_bogus_comment).
if self.strict:
k = self.parse_declaration(i)
else:
- try:
- k = self.parse_declaration(i)
- except HTMLParseError:
- k = self.parse_bogus_comment(i)
+ k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
@@ -269,6 +262,27 @@ class HTMLParser(_markupbase.ParserBase):
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
+ # Internal -- parse html declarations, return length or -1 if not terminated
+ # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
+ # See also parse_declaration in _markupbase
+ def parse_html_declaration(self, i):
+ rawdata = self.rawdata
+ if rawdata[i:i+2] != '
+ gtpos = rawdata.find('>', 9)
+ if gtpos == -1:
+ return -1
+ self.handle_decl(rawdata[i+2:gtpos])
+ return gtpos+1
+ else:
+ return self.parse_bogus_comment(i)
+
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py
index 2e7277e573d..1da2ce4f9b7 100644
--- a/Lib/test/test_htmlparser.py
+++ b/Lib/test/test_htmlparser.py
@@ -93,7 +93,7 @@ class TestCaseBase(unittest.TestCase):
def _parse_error(self, source):
def parse(source=source):
- parser = html.parser.HTMLParser()
+ parser = self.get_collector()
parser.feed(source)
parser.close()
self.assertRaises(html.parser.HTMLParseError, parse)
@@ -122,7 +122,7 @@ comment1b-->
sample
text
“
-
+