From 689a55809818a846d2733241642572840d20570b Mon Sep 17 00:00:00 2001 From: Benjamin Peterson Date: Thu, 18 Mar 2010 22:29:52 +0000 Subject: [PATCH] in tokenize.detect_encoding(), return utf-8-sig when a BOM is found --- Doc/library/tokenize.rst | 3 ++- Lib/test/test_tokenize.py | 10 +++++----- Lib/tokenize.py | 18 ++++++++++++------ Misc/NEWS | 3 +++ 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/Doc/library/tokenize.rst b/Doc/library/tokenize.rst index 7017045f618..ac6ae36bd54 100644 --- a/Doc/library/tokenize.rst +++ b/Doc/library/tokenize.rst @@ -95,7 +95,8 @@ function it uses to do this is available: It detects the encoding from the presence of a UTF-8 BOM or an encoding cookie as specified in :pep:`263`. If both a BOM and a cookie are present, - but disagree, a SyntaxError will be raised. + but disagree, a SyntaxError will be raised. Note that if the BOM is found, + ``'utf-8-sig'`` will be returned as an encoding. If no encoding is specified, then the default of ``'utf-8'`` will be returned. diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 7b91ab2dbd3..1bfac4048ed 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -726,7 +726,7 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'utf-8') + self.assertEquals(encoding, 'utf-8-sig') self.assertEquals(consumed_lines, [b'# something\n', b'print(something)\n']) @@ -747,7 +747,7 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'utf-8') + self.assertEquals(encoding, 'utf-8-sig') self.assertEquals(consumed_lines, [b'# coding=utf-8\n']) def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self): @@ -779,7 +779,7 @@ class TestDetectEncoding(TestCase): b'do_something(else)\n' ) encoding, consumed_lines = detect_encoding(self.get_readline(lines)) - self.assertEquals(encoding, 'utf-8') + self.assertEquals(encoding, 'utf-8-sig') self.assertEquals(consumed_lines, [b'#! something\n', b'f# coding=utf-8\n']) @@ -833,12 +833,12 @@ class TestDetectEncoding(TestCase): readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',)) encoding, consumed_lines = detect_encoding(readline) - self.assertEquals(encoding, 'utf-8') + self.assertEquals(encoding, 'utf-8-sig') self.assertEquals(consumed_lines, [b'print(something)\n']) readline = self.get_readline((b'\xef\xbb\xbf',)) encoding, consumed_lines = detect_encoding(readline) - self.assertEquals(encoding, 'utf-8') + self.assertEquals(encoding, 'utf-8-sig') self.assertEquals(consumed_lines, []) readline = self.get_readline((b'# coding: bad\n',)) diff --git a/Lib/tokenize.py b/Lib/tokenize.py index f82922b7f08..89721371b99 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -301,14 +301,16 @@ def detect_encoding(readline): in. It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. + cookie as specified in pep-0263. If both a bom and a cookie are present, but + disagree, a SyntaxError will be raised. If the encoding cookie is an invalid + charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ bom_found = False encoding = None + default = 'utf-8' def read_or_stop(): try: return readline() @@ -340,8 +342,9 @@ def detect_encoding(readline): if first.startswith(BOM_UTF8): bom_found = True first = first[3:] + default = 'utf-8-sig' if not first: - return 'utf-8', [] + return default, [] encoding = find_cookie(first) if encoding: @@ -349,13 +352,13 @@ def detect_encoding(readline): second = read_or_stop() if not second: - return 'utf-8', [first] + return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] - return 'utf-8', [first, second] + return default, [first, second] def tokenize(readline): @@ -394,6 +397,9 @@ def _tokenize(readline, encoding): indents = [0] if encoding is not None: + if encoding == "utf-8-sig": + # BOM will already have been stripped. + encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') while True: # loop over lines in stream try: diff --git a/Misc/NEWS b/Misc/NEWS index 01c37ce3256..f1b068b47dc 100644 --- a/Misc/NEWS +++ b/Misc/NEWS @@ -283,6 +283,9 @@ C-API Library ------- +- ``tokenize.detect_encoding`` now returns ``'utf-8-sig'`` when a UTF-8 BOM is + detected. + - Issue #8024: Update the Unicode database to 5.2. - Issue #6716/2: Backslash-replace error output in compilall.