mirror of https://github.com/python/cpython
Issue #16979: Fix error handling bugs in the unicode-escape-decode decoder.
This commit is contained in:
commit
8fe5a9f9c3
|
@ -271,12 +271,12 @@ class CodecCallbackTest(unittest.TestCase):
|
|||
|
||||
self.assertEqual(
|
||||
b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1"),
|
||||
"\u3042[<92><117><51><120>]xx"
|
||||
"\u3042[<92><117><51>]xxx"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
b"\\u3042\u3xx".decode("unicode-escape", "test.handler1"),
|
||||
"\u3042[<92><117><51><120><120>]"
|
||||
"\u3042[<92><117><51>]xx"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
|
@ -21,6 +21,11 @@ except ImportError:
|
|||
else:
|
||||
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
|
||||
|
||||
def coding_checker(self, coder):
|
||||
def check(input, expect):
|
||||
self.assertEqual(coder(input), (expect, len(input)))
|
||||
return check
|
||||
|
||||
class Queue(object):
|
||||
"""
|
||||
queue: write bytes at one end, read bytes from the other end
|
||||
|
@ -2009,6 +2014,85 @@ class TypesTest(unittest.TestCase):
|
|||
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
|
||||
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
|
||||
|
||||
|
||||
class UnicodeEscapeTest(unittest.TestCase):
|
||||
def test_empty(self):
|
||||
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
|
||||
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
|
||||
|
||||
def test_raw_encode(self):
|
||||
encode = codecs.unicode_escape_encode
|
||||
for b in range(32, 127):
|
||||
if b != b'\\'[0]:
|
||||
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
|
||||
|
||||
def test_raw_decode(self):
|
||||
decode = codecs.unicode_escape_decode
|
||||
for b in range(256):
|
||||
if b != b'\\'[0]:
|
||||
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
|
||||
|
||||
def test_escape_encode(self):
|
||||
encode = codecs.unicode_escape_encode
|
||||
check = coding_checker(self, encode)
|
||||
check('\t', br'\t')
|
||||
check('\n', br'\n')
|
||||
check('\r', br'\r')
|
||||
check('\\', br'\\')
|
||||
for b in range(32):
|
||||
if chr(b) not in '\t\n\r':
|
||||
check(chr(b), ('\\x%02x' % b).encode())
|
||||
for b in range(127, 256):
|
||||
check(chr(b), ('\\x%02x' % b).encode())
|
||||
check('\u20ac', br'\u20ac')
|
||||
check('\U0001d120', br'\U0001d120')
|
||||
|
||||
def test_escape_decode(self):
|
||||
decode = codecs.unicode_escape_decode
|
||||
check = coding_checker(self, decode)
|
||||
check(b"[\\\n]", "[]")
|
||||
check(br'[\"]', '["]')
|
||||
check(br"[\']", "[']")
|
||||
check(br"[\\]", r"[\]")
|
||||
check(br"[\a]", "[\x07]")
|
||||
check(br"[\b]", "[\x08]")
|
||||
check(br"[\t]", "[\x09]")
|
||||
check(br"[\n]", "[\x0a]")
|
||||
check(br"[\v]", "[\x0b]")
|
||||
check(br"[\f]", "[\x0c]")
|
||||
check(br"[\r]", "[\x0d]")
|
||||
check(br"[\7]", "[\x07]")
|
||||
check(br"[\8]", r"[\8]")
|
||||
check(br"[\78]", "[\x078]")
|
||||
check(br"[\41]", "[!]")
|
||||
check(br"[\418]", "[!8]")
|
||||
check(br"[\101]", "[A]")
|
||||
check(br"[\1010]", "[A0]")
|
||||
check(br"[\x41]", "[A]")
|
||||
check(br"[\x410]", "[A0]")
|
||||
check(br"\u20ac", "\u20ac")
|
||||
check(br"\U0001d120", "\U0001d120")
|
||||
for b in range(256):
|
||||
if b not in b'\n"\'\\abtnvfr01234567xuUN':
|
||||
check(b'\\' + bytes([b]), '\\' + chr(b))
|
||||
|
||||
def test_decode_errors(self):
|
||||
decode = codecs.unicode_escape_decode
|
||||
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
|
||||
for i in range(d):
|
||||
self.assertRaises(UnicodeDecodeError, decode,
|
||||
b"\\" + c + b"0"*i)
|
||||
self.assertRaises(UnicodeDecodeError, decode,
|
||||
b"[\\" + c + b"0"*i + b"]")
|
||||
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
|
||||
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
|
||||
self.assertEqual(decode(data, "replace"),
|
||||
("[\ufffd]\ufffd", len(data)))
|
||||
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
|
||||
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
|
||||
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
|
||||
|
||||
|
||||
class SurrogateEscapeTest(unittest.TestCase):
|
||||
|
||||
def test_utf8(self):
|
||||
|
|
|
@ -234,6 +234,8 @@ Core and Builtins
|
|||
Library
|
||||
-------
|
||||
|
||||
- Issue #16979: Fix error handling bugs in the unicode-escape-decode decoder.
|
||||
|
||||
Have py_compile use importlib as much as possible to avoid code duplication.
|
||||
|
||||
- Issue #180022: Have site.addpackage() consider already known paths even when
|
||||
|
|
|
@ -5378,7 +5378,6 @@ PyUnicode_DecodeUnicodeEscape(const char *s,
|
|||
const char *starts = s;
|
||||
Py_ssize_t startinpos;
|
||||
Py_ssize_t endinpos;
|
||||
int j;
|
||||
_PyUnicodeWriter writer;
|
||||
const char *end;
|
||||
char* message;
|
||||
|
@ -5500,28 +5499,19 @@ PyUnicode_DecodeUnicodeEscape(const char *s,
|
|||
message = "truncated \\UXXXXXXXX escape";
|
||||
hexescape:
|
||||
chr = 0;
|
||||
if (s+digits>end) {
|
||||
endinpos = size;
|
||||
if (unicode_decode_call_errorhandler_writer(
|
||||
errors, &errorHandler,
|
||||
"unicodeescape", "end of string in escape sequence",
|
||||
&starts, &end, &startinpos, &endinpos, &exc, &s,
|
||||
&writer))
|
||||
goto onError;
|
||||
goto nextByte;
|
||||
}
|
||||
for (j = 0; j < digits; ++j) {
|
||||
c = (unsigned char) s[j];
|
||||
if (!Py_ISXDIGIT(c)) {
|
||||
endinpos = (s+j+1)-starts;
|
||||
if (unicode_decode_call_errorhandler_writer(
|
||||
errors, &errorHandler,
|
||||
"unicodeescape", message,
|
||||
&starts, &end, &startinpos, &endinpos, &exc, &s,
|
||||
&writer))
|
||||
goto onError;
|
||||
goto nextByte;
|
||||
if (end - s < digits) {
|
||||
/* count only hex digits */
|
||||
for (; s < end; ++s) {
|
||||
c = (unsigned char)*s;
|
||||
if (!Py_ISXDIGIT(c))
|
||||
goto error;
|
||||
}
|
||||
goto error;
|
||||
}
|
||||
for (; digits--; ++s) {
|
||||
c = (unsigned char)*s;
|
||||
if (!Py_ISXDIGIT(c))
|
||||
goto error;
|
||||
chr = (chr<<4) & ~0xF;
|
||||
if (c >= '0' && c <= '9')
|
||||
chr += c - '0';
|
||||
|
@ -5530,24 +5520,16 @@ PyUnicode_DecodeUnicodeEscape(const char *s,
|
|||
else
|
||||
chr += 10 + c - 'A';
|
||||
}
|
||||
s += j;
|
||||
if (chr == 0xffffffff && PyErr_Occurred())
|
||||
/* _decoding_error will have already written into the
|
||||
target buffer. */
|
||||
break;
|
||||
store:
|
||||
/* when we get here, chr is a 32-bit unicode character */
|
||||
if (chr <= MAX_UNICODE) {
|
||||
WRITECHAR(chr);
|
||||
} else {
|
||||
endinpos = s-starts;
|
||||
if (unicode_decode_call_errorhandler_writer(
|
||||
errors, &errorHandler,
|
||||
"unicodeescape", "illegal Unicode character",
|
||||
&starts, &end, &startinpos, &endinpos, &exc, &s,
|
||||
&writer))
|
||||
goto onError;
|
||||
}
|
||||
message = "illegal Unicode character";
|
||||
if (chr > MAX_UNICODE)
|
||||
goto error;
|
||||
WRITECHAR(chr);
|
||||
break;
|
||||
|
||||
/* \N{name} */
|
||||
|
@ -5575,26 +5557,13 @@ PyUnicode_DecodeUnicodeEscape(const char *s,
|
|||
goto store;
|
||||
}
|
||||
}
|
||||
endinpos = s-starts;
|
||||
if (unicode_decode_call_errorhandler_writer(
|
||||
errors, &errorHandler,
|
||||
"unicodeescape", message,
|
||||
&starts, &end, &startinpos, &endinpos, &exc, &s,
|
||||
&writer))
|
||||
goto onError;
|
||||
break;
|
||||
goto error;
|
||||
|
||||
default:
|
||||
if (s > end) {
|
||||
message = "\\ at end of string";
|
||||
s--;
|
||||
endinpos = s-starts;
|
||||
if (unicode_decode_call_errorhandler_writer(
|
||||
errors, &errorHandler,
|
||||
"unicodeescape", message,
|
||||
&starts, &end, &startinpos, &endinpos, &exc, &s,
|
||||
&writer))
|
||||
goto onError;
|
||||
goto error;
|
||||
}
|
||||
else {
|
||||
WRITECHAR('\\');
|
||||
|
@ -5602,8 +5571,17 @@ PyUnicode_DecodeUnicodeEscape(const char *s,
|
|||
}
|
||||
break;
|
||||
}
|
||||
nextByte:
|
||||
;
|
||||
continue;
|
||||
|
||||
error:
|
||||
endinpos = s-starts;
|
||||
if (unicode_decode_call_errorhandler_writer(
|
||||
errors, &errorHandler,
|
||||
"unicodeescape", message,
|
||||
&starts, &end, &startinpos, &endinpos, &exc, &s,
|
||||
&writer))
|
||||
goto onError;
|
||||
continue;
|
||||
}
|
||||
#undef WRITECHAR
|
||||
|
||||
|
|
Loading…
Reference in New Issue