Merged revisions 78603 via svnmerge from

svn+ssh://pythondev@svn.python.org/python/trunk

........
  r78603 | victor.stinner | 2010-03-03 00:20:02 +0100 (mer., 03 mars 2010) | 5 lines

  Issue #7820: The parser tokenizer restores all bytes in the right if the BOM
  check fails.

  Fix an assertion in pydebug mode.
........
This commit is contained in:
Victor Stinner 2010-03-21 13:09:24 +00:00
parent a0dc275f08
commit 0217c958f2
3 changed files with 50 additions and 16 deletions

View File

@ -30,6 +30,17 @@ class PEP263Test(unittest.TestCase):
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, '\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, '\xef\xbb\x20')
def test_main():
test_support.run_unittest(PEP263Test)

View File

@ -12,6 +12,9 @@ What's New in Python 2.6.6 alpha 1?
Core and Builtins
-----------------
- Issue #7820: The parser tokenizer restores all bytes in the right if
the BOM check fails.
Library
-------

View File

@ -304,37 +304,57 @@ check_bom(int get_char(struct tok_state *),
int set_readline(struct tok_state *, const char *),
struct tok_state *tok)
{
int ch = get_char(tok);
int ch1, ch2, ch3;
ch1 = get_char(tok);
tok->decoding_state = 1;
if (ch == EOF) {
if (ch1 == EOF) {
return 1;
} else if (ch == 0xEF) {
ch = get_char(tok); if (ch != 0xBB) goto NON_BOM;
ch = get_char(tok); if (ch != 0xBF) goto NON_BOM;
} else if (ch1 == 0xEF) {
ch2 = get_char(tok);
if (ch2 != 0xBB) {
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
ch3 = get_char(tok);
if (ch3 != 0xBF) {
unget_char(ch3, tok);
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
#if 0
/* Disable support for UTF-16 BOMs until a decision
is made whether this needs to be supported. */
} else if (ch == 0xFE) {
ch = get_char(tok); if (ch != 0xFF) goto NON_BOM;
if (!set_readline(tok, "utf-16-be")) return 0;
} else if (ch1 == 0xFE) {
ch2 = get_char(tok);
if (ch2 != 0xFF) {
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
if (!set_readline(tok, "utf-16-be"))
return 0;
tok->decoding_state = -1;
} else if (ch == 0xFF) {
ch = get_char(tok); if (ch != 0xFE) goto NON_BOM;
if (!set_readline(tok, "utf-16-le")) return 0;
} else if (ch1 == 0xFF) {
ch2 = get_char(tok);
if (ch2 != 0xFE) {
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
if (!set_readline(tok, "utf-16-le"))
return 0;
tok->decoding_state = -1;
#endif
} else {
unget_char(ch, tok);
unget_char(ch1, tok);
return 1;
}
if (tok->encoding != NULL)
PyMem_FREE(tok->encoding);
tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */
return 1;
NON_BOM:
/* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */
unget_char(0xFF, tok); /* XXX this will cause a syntax error */
return 1;
}
/* Read a line of text from TOK into S, using the stream in TOK.