diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py index 203b0091bdd..7589ab805a7 100644 --- a/Lib/idlelib/IOBinding.py +++ b/Lib/idlelib/IOBinding.py @@ -63,7 +63,7 @@ locale_encoding = locale_encoding.lower() encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check! ### 'encoding' is used below in encode(), check! -coding_re = re.compile("coding[:=]\s*([-\w_.]+)") +coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) def coding_spec(data): """Return the encoding declaration according to PEP 263. @@ -84,14 +84,16 @@ def coding_spec(data): lines = data # consider only the first two lines if '\n' in lines: - lst = lines.split('\n')[:2] + lst = lines.split('\n', 2)[:2] elif '\r' in lines: - lst = lines.split('\r')[:2] + lst = lines.split('\r', 2)[:2] + else: + lst = [lines] + for line in lst: + match = coding_re.match(line) + if match is not None: + break else: - lst = list(lines) - str = '\n'.join(lst) - match = coding_re.search(str) - if not match: return None name = match.group(1) try: diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py index 31e29698e62..83656fc19f4 100644 --- a/Lib/lib2to3/pgen2/tokenize.py +++ b/Lib/lib2to3/pgen2/tokenize.py @@ -236,7 +236,7 @@ class Untokenizer: startline = False toks_append(tokval) -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") +cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" @@ -281,11 +281,10 @@ def detect_encoding(readline): line_string = line.decode('ascii') except UnicodeDecodeError: return None - - matches = cookie_re.findall(line_string) - if not matches: + match = cookie_re.match(line_string) + if not match: return None - encoding = _get_normal_name(matches[0]) + encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: diff --git a/Lib/lib2to3/tests/data/false_encoding.py b/Lib/lib2to3/tests/data/false_encoding.py new file mode 100644 index 00000000000..f4e59e787da --- /dev/null +++ b/Lib/lib2to3/tests/data/false_encoding.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python +print '#coding=0' diff --git a/Lib/lib2to3/tests/test_refactor.py b/Lib/lib2to3/tests/test_refactor.py index 8bdebc1f3da..5ecd9b1cb3e 100644 --- a/Lib/lib2to3/tests/test_refactor.py +++ b/Lib/lib2to3/tests/test_refactor.py @@ -271,6 +271,10 @@ from __future__ import print_function""" fn = os.path.join(TEST_DATA_DIR, "different_encoding.py") self.check_file_refactoring(fn) + def test_false_file_encoding(self): + fn = os.path.join(TEST_DATA_DIR, "false_encoding.py") + data = self.check_file_refactoring(fn) + def test_bom(self): fn = os.path.join(TEST_DATA_DIR, "bom.py") data = self.check_file_refactoring(fn) diff --git a/Lib/test/test_importlib/source/test_source_encoding.py b/Lib/test/test_importlib/source/test_source_encoding.py index 0ca51954390..ba02b442743 100644 --- a/Lib/test/test_importlib/source/test_source_encoding.py +++ b/Lib/test/test_importlib/source/test_source_encoding.py @@ -10,7 +10,7 @@ import unicodedata import unittest -CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)') +CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) class EncodingTest(unittest.TestCase): @@ -41,7 +41,7 @@ class EncodingTest(unittest.TestCase): def create_source(self, encoding): encoding_line = "# coding={0}".format(encoding) - assert CODING_RE.search(encoding_line) + assert CODING_RE.match(encoding_line) source_lines = [encoding_line.encode('utf-8')] source_lines.append(self.source_line.encode(encoding)) return b'\n'.join(source_lines) @@ -50,7 +50,7 @@ class EncodingTest(unittest.TestCase): # Make sure that an encoding that has never been a standard one for # Python works. encoding_line = "# coding=koi8-r" - assert CODING_RE.search(encoding_line) + assert CODING_RE.match(encoding_line) source = "{0}\na=42\n".format(encoding_line).encode("koi8-r") self.run_test(source) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index b4a58f0db26..17650855eb3 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -946,6 +946,13 @@ class TestDetectEncoding(TestCase): readline = self.get_readline((b'# coding: bad\n',)) self.assertRaises(SyntaxError, detect_encoding, readline) + def test_false_encoding(self): + # Issue 18873: "Encoding" detected in non-comment lines + readline = self.get_readline((b'print("#coding=fake")',)) + encoding, consumed_lines = detect_encoding(readline) + self.assertEqual(encoding, 'utf-8') + self.assertEqual(consumed_lines, [b'print("#coding=fake")']) + def test_open(self): filename = support.TESTFN + '.py' self.addCleanup(support.unlink, filename) diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 2fbde0fa9b4..91f8e4785b9 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -31,7 +31,7 @@ from token import * from codecs import lookup, BOM_UTF8 import collections from io import TextIOWrapper -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") +cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) import token __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", @@ -372,10 +372,10 @@ def detect_encoding(readline): msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) - matches = cookie_re.findall(line_string) - if not matches: + match = cookie_re.match(line_string) + if not match: return None - encoding = _get_normal_name(matches[0]) + encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: diff --git a/Misc/NEWS b/Misc/NEWS index 7e2a65da1ea..2f513078669 100644 --- a/Misc/NEWS +++ b/Misc/NEWS @@ -12,6 +12,9 @@ Core and Builtins Library ------- +- Issue #18873: The tokenize module now detects Python source code encoding + only in comment lines. + - Issue #17764: Enable http.server to bind to a user specified network interface. Patch contributed by Malte Swart. @@ -47,6 +50,9 @@ Tests IDLE ---- +- Issue #18873: IDLE now detects Python source code encoding only in comment + lines. + - Issue #18988: The "Tab" key now works when a word is already autocompleted. Documentation @@ -55,6 +61,12 @@ Documentation - Issue #17003: Unified the size argument names in the io module with common practice. +Tools/Demos +----------- + +- Issue #18873: 2to3 and the findnocoding.py script now detect Python source + code encoding only in comment lines. + What's New in Python 3.4.0 Alpha 2? =================================== diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py index b3e9dc73610..c0997d6598e 100755 --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -32,13 +32,13 @@ except ImportError: "no sophisticated Python source file search will be done.", file=sys.stderr) -decl_re = re.compile(rb"coding[=:]\s*([-\w.]+)") +decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') def get_declaration(line): - match = decl_re.search(line) + match = decl_re.match(line) if match: return match.group(1) - return '' + return b'' def has_correct_encoding(text, codec): try: