Use parser's token stack to unparse the annotations

This commit is contained in:
Batuhan Taskaya 2020-10-07 22:24:50 +03:00 committed by Pablo Galindo
parent 19e814bcee
commit 3f98f369d4
No known key found for this signature in database
GPG Key ID: FFE87404168BD847
6 changed files with 69 additions and 39 deletions

View File

@ -1,3 +1,4 @@
import ast
import unittest
import sys
from textwrap import dedent
@ -34,17 +35,14 @@ class PostponedAnnotationsTestCase(unittest.TestCase):
self.assertEqual(func_ret_ann, var_ann2)
return func_ret_ann
def assertAnnotationEqual(
self, annotation, expected=None, drop_parens=False, is_tuple=False,
):
actual = self.getActual(annotation)
if expected is None:
expected = annotation if not is_tuple else annotation[1:-1]
if drop_parens:
self.assertNotEqual(actual, expected)
actual = actual.replace("(", "").replace(")", "")
self.assertEqual(actual, expected)
def assertAnnotationEqual(self, annotation, expected=None):
if expected is None:
expected = annotation
with self.subTest(annotation=annotation, expected=expected):
actual = self.getActual(annotation)
self.assertEqual(ast.dump(ast.parse(actual)), ast.dump(ast.parse(expected)), msg = f"{expected} != {actual}")
def test_annotations(self):
eq = self.assertAnnotationEqual
@ -216,10 +214,10 @@ class PostponedAnnotationsTestCase(unittest.TestCase):
self.assertAnnotationEqual("1e1000j")
self.assertAnnotationEqual("-1e1000")
self.assertAnnotationEqual("3+1e1000j")
self.assertAnnotationEqual("(1e1000, 1e1000j)")
self.assertAnnotationEqual("(1e1000,1e1000j)")
self.assertAnnotationEqual("'inf'")
self.assertAnnotationEqual("('inf', 1e1000, 'infxxx', 1e1000j)")
self.assertAnnotationEqual("(1e1000, (1e1000j,))")
self.assertAnnotationEqual("('inf',1e1000,'infxxx',1e1000j)")
self.assertAnnotationEqual("(1e1000,(1e1000j,))")
if __name__ == "__main__":

View File

@ -333,7 +333,6 @@ PYTHON_OBJS= \
Python/asdl.o \
Python/ast.o \
Python/ast_opt.o \
Python/ast_unparse.o \
Python/bltinmodule.o \
Python/ceval.o \
Python/codecs.o \
@ -896,7 +895,7 @@ regen-keyword:
$(srcdir)/Lib/keyword.py.new
$(UPDATE_FILE) $(srcdir)/Lib/keyword.py $(srcdir)/Lib/keyword.py.new
Python/compile.o Python/symtable.o Python/ast_unparse.o Python/ast.o Python/future.o: $(srcdir)/Include/Python-ast.h
Python/compile.o Python/symtable.o Python/ast.o Python/future.o: $(srcdir)/Include/Python-ast.h
Python/getplatform.o: $(srcdir)/Python/getplatform.c
$(CC) -c $(PY_CORE_CFLAGS) -DPLATFORM='"$(MACHDEP)"' -o $@ $(srcdir)/Python/getplatform.c

View File

@ -436,7 +436,6 @@
<ClCompile Include="..\Python\asdl.c" />
<ClCompile Include="..\Python\ast.c" />
<ClCompile Include="..\Python\ast_opt.c" />
<ClCompile Include="..\Python\ast_unparse.c" />
<ClCompile Include="..\Python\bltinmodule.c" />
<ClCompile Include="..\Python\bootstrap_hash.c" />
<ClCompile Include="..\Python\ceval.c" />

View File

@ -962,9 +962,6 @@
<ClCompile Include="..\Python\ast_opt.c">
<Filter>Python</Filter>
</ClCompile>
<ClCompile Include="..\Python\ast_unparse.c">
<Filter>Python</Filter>
</ClCompile>
<ClCompile Include="..\Python\bltinmodule.c">
<Filter>Python</Filter>
</ClCompile>

View File

@ -2282,32 +2282,69 @@ expr_ty _PyPegen_collect_call_seqs(Parser *p, asdl_expr_seq *a, asdl_seq *b,
col_offset, end_lineno, end_col_offset, arena);
}
#define IS_KEYWORD(token) token->type > NT_OFFSET || token->type == AWAIT ||\
token->type == ASYNC
expr_ty _PyPegen_produce_string(Parser *p, expr_ty a) {
Py_ssize_t left = a->col_offset;
Py_ssize_t right = a->end_col_offset;
Py_ssize_t index = left;
Py_ssize_t parens = 0;
while (p->tok->buf[index] != ':' && p->tok->buf[index] != '>') {
index--;
if (p->tok->buf[index] == '(') {
parens++;
left = index;
int start = -1, end = -1, length = 0, seen_walrus = 0, mark;
for (mark = p->mark; start == -1; --mark) {
Token* token = p->tokens[mark];
if (token->end_lineno == a->end_lineno
&& token->end_col_offset == a->end_col_offset)
{
end = mark;
}
if (token->lineno == a->lineno
&& token->col_offset == a->col_offset)
{
start = mark;
}
if (end != -1) {
length = length + PyBytes_GET_SIZE(token->bytes);
if (IS_KEYWORD(token) || token->type == NUMBER) {
length = length + 2;
} else if (token->type == COLONEQUAL) {
seen_walrus = 1;
}
}
}
index = right;
while (parens != 0) {
if (p->tok->buf[index] == ')') {
parens--;
right = index + 1;
assert(start != -1 || end != -1);
char* buffer = PyMem_Calloc(length + seen_walrus * 2, sizeof(char*));
for (mark = start; mark <= end; ++mark) {
int is_start = mark == start;
Token* token = p->tokens[mark];
char *value = PyBytes_AS_STRING(token->bytes);
if (IS_KEYWORD(token) || token->type == NUMBER) {
const char *format;
char new_value[strlen(value) + 2];
if (is_start) {
format = "%s ";
} else {
format = " %s ";
}
value = PyMem_Malloc(sprintf(new_value, format, value) + 1);
strcpy(value, new_value);
}
if (mark == start) {
if (seen_walrus) {
strcpy(buffer, "(");
strcat(buffer, value);
} else {
strcpy(buffer, value);
}
} else {
strcat(buffer, value);
}
index++;
}
PyObject *res= PyUnicode_DecodeUTF8(p->tok->buf+left, right-left, NULL);
if (seen_walrus) {
strcat(buffer, ")");
}
PyObject *res = PyUnicode_FromString(buffer);
PyMem_Free(buffer);
if (res == NULL) {
return NULL;
}
if (PyArena_AddPyObject(p->arena, res) < 0) {
Py_DECREF(res);
return NULL;

View File

@ -2047,7 +2047,7 @@ compiler_visit_argannotation(struct compiler *c, identifier id,
ADDOP_LOAD_CONST(c, mangled);
Py_DECREF(mangled);
VISIT(c, annexpr, annotation);
VISIT(c, expr, annotation);
*annotations_len += 2;
}
return 1;