mirror of https://github.com/python/cpython
Issue #18873: The tokenize module, IDLE, 2to3, and the findnocoding.py script
now detect Python source code encoding only in comment lines.
This commit is contained in:
parent
975fce3788
commit
dafea85190
|
@ -63,7 +63,7 @@ locale_encoding = locale_encoding.lower()
|
||||||
encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check!
|
encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check!
|
||||||
### 'encoding' is used below in encode(), check!
|
### 'encoding' is used below in encode(), check!
|
||||||
|
|
||||||
coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
|
coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
|
||||||
|
|
||||||
def coding_spec(data):
|
def coding_spec(data):
|
||||||
"""Return the encoding declaration according to PEP 263.
|
"""Return the encoding declaration according to PEP 263.
|
||||||
|
@ -84,14 +84,16 @@ def coding_spec(data):
|
||||||
lines = data
|
lines = data
|
||||||
# consider only the first two lines
|
# consider only the first two lines
|
||||||
if '\n' in lines:
|
if '\n' in lines:
|
||||||
lst = lines.split('\n')[:2]
|
lst = lines.split('\n', 2)[:2]
|
||||||
elif '\r' in lines:
|
elif '\r' in lines:
|
||||||
lst = lines.split('\r')[:2]
|
lst = lines.split('\r', 2)[:2]
|
||||||
|
else:
|
||||||
|
lst = [lines]
|
||||||
|
for line in lst:
|
||||||
|
match = coding_re.match(line)
|
||||||
|
if match is not None:
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
lst = list(lines)
|
|
||||||
str = '\n'.join(lst)
|
|
||||||
match = coding_re.search(str)
|
|
||||||
if not match:
|
|
||||||
return None
|
return None
|
||||||
name = match.group(1)
|
name = match.group(1)
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -236,7 +236,7 @@ class Untokenizer:
|
||||||
startline = False
|
startline = False
|
||||||
toks_append(tokval)
|
toks_append(tokval)
|
||||||
|
|
||||||
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
|
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
|
||||||
|
|
||||||
def _get_normal_name(orig_enc):
|
def _get_normal_name(orig_enc):
|
||||||
"""Imitates get_normal_name in tokenizer.c."""
|
"""Imitates get_normal_name in tokenizer.c."""
|
||||||
|
@ -281,11 +281,10 @@ def detect_encoding(readline):
|
||||||
line_string = line.decode('ascii')
|
line_string = line.decode('ascii')
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
return None
|
return None
|
||||||
|
match = cookie_re.match(line_string)
|
||||||
matches = cookie_re.findall(line_string)
|
if not match:
|
||||||
if not matches:
|
|
||||||
return None
|
return None
|
||||||
encoding = _get_normal_name(matches[0])
|
encoding = _get_normal_name(match.group(1))
|
||||||
try:
|
try:
|
||||||
codec = lookup(encoding)
|
codec = lookup(encoding)
|
||||||
except LookupError:
|
except LookupError:
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
print '#coding=0'
|
|
@ -271,6 +271,10 @@ from __future__ import print_function"""
|
||||||
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
|
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
|
||||||
self.check_file_refactoring(fn)
|
self.check_file_refactoring(fn)
|
||||||
|
|
||||||
|
def test_false_file_encoding(self):
|
||||||
|
fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
|
||||||
|
data = self.check_file_refactoring(fn)
|
||||||
|
|
||||||
def test_bom(self):
|
def test_bom(self):
|
||||||
fn = os.path.join(TEST_DATA_DIR, "bom.py")
|
fn = os.path.join(TEST_DATA_DIR, "bom.py")
|
||||||
data = self.check_file_refactoring(fn)
|
data = self.check_file_refactoring(fn)
|
||||||
|
|
|
@ -10,7 +10,7 @@ import unicodedata
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
|
|
||||||
CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)')
|
CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
|
||||||
|
|
||||||
|
|
||||||
class EncodingTest(unittest.TestCase):
|
class EncodingTest(unittest.TestCase):
|
||||||
|
@ -41,7 +41,7 @@ class EncodingTest(unittest.TestCase):
|
||||||
|
|
||||||
def create_source(self, encoding):
|
def create_source(self, encoding):
|
||||||
encoding_line = "# coding={0}".format(encoding)
|
encoding_line = "# coding={0}".format(encoding)
|
||||||
assert CODING_RE.search(encoding_line)
|
assert CODING_RE.match(encoding_line)
|
||||||
source_lines = [encoding_line.encode('utf-8')]
|
source_lines = [encoding_line.encode('utf-8')]
|
||||||
source_lines.append(self.source_line.encode(encoding))
|
source_lines.append(self.source_line.encode(encoding))
|
||||||
return b'\n'.join(source_lines)
|
return b'\n'.join(source_lines)
|
||||||
|
@ -50,7 +50,7 @@ class EncodingTest(unittest.TestCase):
|
||||||
# Make sure that an encoding that has never been a standard one for
|
# Make sure that an encoding that has never been a standard one for
|
||||||
# Python works.
|
# Python works.
|
||||||
encoding_line = "# coding=koi8-r"
|
encoding_line = "# coding=koi8-r"
|
||||||
assert CODING_RE.search(encoding_line)
|
assert CODING_RE.match(encoding_line)
|
||||||
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
|
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
|
||||||
self.run_test(source)
|
self.run_test(source)
|
||||||
|
|
||||||
|
|
|
@ -946,6 +946,13 @@ class TestDetectEncoding(TestCase):
|
||||||
readline = self.get_readline((b'# coding: bad\n',))
|
readline = self.get_readline((b'# coding: bad\n',))
|
||||||
self.assertRaises(SyntaxError, detect_encoding, readline)
|
self.assertRaises(SyntaxError, detect_encoding, readline)
|
||||||
|
|
||||||
|
def test_false_encoding(self):
|
||||||
|
# Issue 18873: "Encoding" detected in non-comment lines
|
||||||
|
readline = self.get_readline((b'print("#coding=fake")',))
|
||||||
|
encoding, consumed_lines = detect_encoding(readline)
|
||||||
|
self.assertEqual(encoding, 'utf-8')
|
||||||
|
self.assertEqual(consumed_lines, [b'print("#coding=fake")'])
|
||||||
|
|
||||||
def test_open(self):
|
def test_open(self):
|
||||||
filename = support.TESTFN + '.py'
|
filename = support.TESTFN + '.py'
|
||||||
self.addCleanup(support.unlink, filename)
|
self.addCleanup(support.unlink, filename)
|
||||||
|
|
|
@ -31,7 +31,7 @@ from token import *
|
||||||
from codecs import lookup, BOM_UTF8
|
from codecs import lookup, BOM_UTF8
|
||||||
import collections
|
import collections
|
||||||
from io import TextIOWrapper
|
from io import TextIOWrapper
|
||||||
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
|
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
|
||||||
|
|
||||||
import token
|
import token
|
||||||
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
|
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
|
||||||
|
@ -372,10 +372,10 @@ def detect_encoding(readline):
|
||||||
msg = '{} for {!r}'.format(msg, filename)
|
msg = '{} for {!r}'.format(msg, filename)
|
||||||
raise SyntaxError(msg)
|
raise SyntaxError(msg)
|
||||||
|
|
||||||
matches = cookie_re.findall(line_string)
|
match = cookie_re.match(line_string)
|
||||||
if not matches:
|
if not match:
|
||||||
return None
|
return None
|
||||||
encoding = _get_normal_name(matches[0])
|
encoding = _get_normal_name(match.group(1))
|
||||||
try:
|
try:
|
||||||
codec = lookup(encoding)
|
codec = lookup(encoding)
|
||||||
except LookupError:
|
except LookupError:
|
||||||
|
|
|
@ -68,6 +68,8 @@ Core and Builtins
|
||||||
Library
|
Library
|
||||||
-------
|
-------
|
||||||
|
|
||||||
|
- Issue #18873: The tokenize module now detects Python source code encoding
|
||||||
|
only in comment lines.
|
||||||
|
|
||||||
- Issue #17324: Fix http.server's request handling case on trailing '/'. Patch
|
- Issue #17324: Fix http.server's request handling case on trailing '/'. Patch
|
||||||
contributed by Vajrasky Kok.
|
contributed by Vajrasky Kok.
|
||||||
|
@ -304,6 +306,9 @@ C API
|
||||||
IDLE
|
IDLE
|
||||||
----
|
----
|
||||||
|
|
||||||
|
- Issue #18873: IDLE now detects Python source code encoding only in comment
|
||||||
|
lines.
|
||||||
|
|
||||||
- Issue #18988: The "Tab" key now works when a word is already autocompleted.
|
- Issue #18988: The "Tab" key now works when a word is already autocompleted.
|
||||||
|
|
||||||
- Issue #18489: Add tests for SearchEngine. Original patch by Phil Webster.
|
- Issue #18489: Add tests for SearchEngine. Original patch by Phil Webster.
|
||||||
|
@ -430,6 +435,9 @@ Documentation
|
||||||
Tools/Demos
|
Tools/Demos
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
|
- Issue #18873: 2to3 and the findnocoding.py script now detect Python source
|
||||||
|
code encoding only in comment lines.
|
||||||
|
|
||||||
- Issue #18817: Fix a resource warning in Lib/aifc.py demo.
|
- Issue #18817: Fix a resource warning in Lib/aifc.py demo.
|
||||||
|
|
||||||
- Issue #18439: Make patchcheck work on Windows for ACKS, NEWS.
|
- Issue #18439: Make patchcheck work on Windows for ACKS, NEWS.
|
||||||
|
|
|
@ -32,13 +32,13 @@ except ImportError:
|
||||||
"no sophisticated Python source file search will be done.", file=sys.stderr)
|
"no sophisticated Python source file search will be done.", file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
decl_re = re.compile(rb"coding[=:]\s*([-\w.]+)")
|
decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
|
||||||
|
|
||||||
def get_declaration(line):
|
def get_declaration(line):
|
||||||
match = decl_re.search(line)
|
match = decl_re.match(line)
|
||||||
if match:
|
if match:
|
||||||
return match.group(1)
|
return match.group(1)
|
||||||
return ''
|
return b''
|
||||||
|
|
||||||
def has_correct_encoding(text, codec):
|
def has_correct_encoding(text, codec):
|
||||||
try:
|
try:
|
||||||
|
|
Loading…
Reference in New Issue