mirror of https://github.com/python/cpython
bpo-45408: Don't override previous tokenizer errors in the second parser pass (GH-28812)
This commit is contained in:
parent
6811fdaec8
commit
0219017df7
|
@ -1075,6 +1075,14 @@ Module(
|
||||||
with self.assertRaisesRegex(ValueError, msg):
|
with self.assertRaisesRegex(ValueError, msg):
|
||||||
ast.literal_eval(node)
|
ast.literal_eval(node)
|
||||||
|
|
||||||
|
def test_literal_eval_syntax_errors(self):
|
||||||
|
msg = "unexpected character after line continuation character"
|
||||||
|
with self.assertRaisesRegex(SyntaxError, msg):
|
||||||
|
ast.literal_eval(r'''
|
||||||
|
\
|
||||||
|
(\
|
||||||
|
\ ''')
|
||||||
|
|
||||||
def test_bad_integer(self):
|
def test_bad_integer(self):
|
||||||
# issue13436: Bad error message with invalid numeric values
|
# issue13436: Bad error message with invalid numeric values
|
||||||
body = [ast.ImportFrom(module='time',
|
body = [ast.ImportFrom(module='time',
|
||||||
|
|
|
@ -223,7 +223,7 @@ class ExceptionTests(unittest.TestCase):
|
||||||
check('x = "a', 1, 5)
|
check('x = "a', 1, 5)
|
||||||
check('lambda x: x = 2', 1, 1)
|
check('lambda x: x = 2', 1, 1)
|
||||||
check('f{a + b + c}', 1, 1)
|
check('f{a + b + c}', 1, 1)
|
||||||
check('[file for str(file) in []\n])', 2, 2)
|
check('[file for str(file) in []\n])', 1, 11)
|
||||||
check('a = « hello » « world »', 1, 5)
|
check('a = « hello » « world »', 1, 5)
|
||||||
check('[\nfile\nfor str(file)\nin\n[]\n]', 3, 5)
|
check('[\nfile\nfor str(file)\nin\n[]\n]', 3, 5)
|
||||||
check('[file for\n str(file) in []]', 2, 2)
|
check('[file for\n str(file) in []]', 2, 2)
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
Fix a crash in the parser when reporting tokenizer errors that occur at the
|
||||||
|
same time unclosed parentheses are detected. Patch by Pablo Galindo.
|
|
@ -1342,13 +1342,16 @@ _PyPegen_run_parser(Parser *p)
|
||||||
{
|
{
|
||||||
void *res = _PyPegen_parse(p);
|
void *res = _PyPegen_parse(p);
|
||||||
if (res == NULL) {
|
if (res == NULL) {
|
||||||
|
if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_SyntaxError)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
Token *last_token = p->tokens[p->fill - 1];
|
Token *last_token = p->tokens[p->fill - 1];
|
||||||
reset_parser_state(p);
|
reset_parser_state(p);
|
||||||
_PyPegen_parse(p);
|
_PyPegen_parse(p);
|
||||||
if (PyErr_Occurred()) {
|
if (PyErr_Occurred()) {
|
||||||
// Prioritize tokenizer errors to custom syntax errors raised
|
// Prioritize tokenizer errors to custom syntax errors raised
|
||||||
// on the second phase only if the errors come from the parser.
|
// on the second phase only if the errors come from the parser.
|
||||||
if (p->tok->done != E_ERROR && PyErr_ExceptionMatches(PyExc_SyntaxError)) {
|
if (p->tok->done == E_DONE && PyErr_ExceptionMatches(PyExc_SyntaxError)) {
|
||||||
_PyPegen_check_tokenizer_errors(p);
|
_PyPegen_check_tokenizer_errors(p);
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
Loading…
Reference in New Issue