in tokenize.detect_encoding(), return utf-8-sig when a BOM is found
This commit is contained in:
parent
8c8042734a
commit
689a558098
|
@ -95,7 +95,8 @@ function it uses to do this is available:
|
|||
|
||||
It detects the encoding from the presence of a UTF-8 BOM or an encoding
|
||||
cookie as specified in :pep:`263`. If both a BOM and a cookie are present,
|
||||
but disagree, a SyntaxError will be raised.
|
||||
but disagree, a SyntaxError will be raised. Note that if the BOM is found,
|
||||
``'utf-8-sig'`` will be returned as an encoding.
|
||||
|
||||
If no encoding is specified, then the default of ``'utf-8'`` will be returned.
|
||||
|
||||
|
|
|
@ -726,7 +726,7 @@ class TestDetectEncoding(TestCase):
|
|||
b'do_something(else)\n'
|
||||
)
|
||||
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
|
||||
self.assertEquals(encoding, 'utf-8')
|
||||
self.assertEquals(encoding, 'utf-8-sig')
|
||||
self.assertEquals(consumed_lines,
|
||||
[b'# something\n', b'print(something)\n'])
|
||||
|
||||
|
@ -747,7 +747,7 @@ class TestDetectEncoding(TestCase):
|
|||
b'do_something(else)\n'
|
||||
)
|
||||
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
|
||||
self.assertEquals(encoding, 'utf-8')
|
||||
self.assertEquals(encoding, 'utf-8-sig')
|
||||
self.assertEquals(consumed_lines, [b'# coding=utf-8\n'])
|
||||
|
||||
def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
|
||||
|
@ -779,7 +779,7 @@ class TestDetectEncoding(TestCase):
|
|||
b'do_something(else)\n'
|
||||
)
|
||||
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
|
||||
self.assertEquals(encoding, 'utf-8')
|
||||
self.assertEquals(encoding, 'utf-8-sig')
|
||||
self.assertEquals(consumed_lines,
|
||||
[b'#! something\n', b'f# coding=utf-8\n'])
|
||||
|
||||
|
@ -833,12 +833,12 @@ class TestDetectEncoding(TestCase):
|
|||
|
||||
readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
|
||||
encoding, consumed_lines = detect_encoding(readline)
|
||||
self.assertEquals(encoding, 'utf-8')
|
||||
self.assertEquals(encoding, 'utf-8-sig')
|
||||
self.assertEquals(consumed_lines, [b'print(something)\n'])
|
||||
|
||||
readline = self.get_readline((b'\xef\xbb\xbf',))
|
||||
encoding, consumed_lines = detect_encoding(readline)
|
||||
self.assertEquals(encoding, 'utf-8')
|
||||
self.assertEquals(encoding, 'utf-8-sig')
|
||||
self.assertEquals(consumed_lines, [])
|
||||
|
||||
readline = self.get_readline((b'# coding: bad\n',))
|
||||
|
|
|
@ -301,14 +301,16 @@ def detect_encoding(readline):
|
|||
in.
|
||||
|
||||
It detects the encoding from the presence of a utf-8 bom or an encoding
|
||||
cookie as specified in pep-0263. If both a bom and a cookie are present,
|
||||
but disagree, a SyntaxError will be raised. If the encoding cookie is an
|
||||
invalid charset, raise a SyntaxError.
|
||||
cookie as specified in pep-0263. If both a bom and a cookie are present, but
|
||||
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
|
||||
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
|
||||
'utf-8-sig' is returned.
|
||||
|
||||
If no encoding is specified, then the default of 'utf-8' will be returned.
|
||||
"""
|
||||
bom_found = False
|
||||
encoding = None
|
||||
default = 'utf-8'
|
||||
def read_or_stop():
|
||||
try:
|
||||
return readline()
|
||||
|
@ -340,8 +342,9 @@ def detect_encoding(readline):
|
|||
if first.startswith(BOM_UTF8):
|
||||
bom_found = True
|
||||
first = first[3:]
|
||||
default = 'utf-8-sig'
|
||||
if not first:
|
||||
return 'utf-8', []
|
||||
return default, []
|
||||
|
||||
encoding = find_cookie(first)
|
||||
if encoding:
|
||||
|
@ -349,13 +352,13 @@ def detect_encoding(readline):
|
|||
|
||||
second = read_or_stop()
|
||||
if not second:
|
||||
return 'utf-8', [first]
|
||||
return default, [first]
|
||||
|
||||
encoding = find_cookie(second)
|
||||
if encoding:
|
||||
return encoding, [first, second]
|
||||
|
||||
return 'utf-8', [first, second]
|
||||
return default, [first, second]
|
||||
|
||||
|
||||
def tokenize(readline):
|
||||
|
@ -394,6 +397,9 @@ def _tokenize(readline, encoding):
|
|||
indents = [0]
|
||||
|
||||
if encoding is not None:
|
||||
if encoding == "utf-8-sig":
|
||||
# BOM will already have been stripped.
|
||||
encoding = "utf-8"
|
||||
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
|
||||
while True: # loop over lines in stream
|
||||
try:
|
||||
|
|
Loading…
Reference in New Issue