1991-02-19 08:39:46 -04:00
|
|
|
|
1990-10-14 09:07:46 -03:00
|
|
|
/* Parser implementation */
|
|
|
|
|
|
|
|
/* For a description, see the comments at end of this file */
|
|
|
|
|
|
|
|
/* XXX To do: error recovery */
|
|
|
|
|
2001-12-03 23:18:48 -04:00
|
|
|
#include "Python.h"
|
1990-10-14 09:07:46 -03:00
|
|
|
#include "token.h"
|
|
|
|
#include "grammar.h"
|
|
|
|
#include "node.h"
|
|
|
|
#include "parser.h"
|
|
|
|
#include "errcode.h"
|
2019-01-31 07:40:27 -04:00
|
|
|
#include "graminit.h"
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
|
1996-12-30 12:17:54 -04:00
|
|
|
#ifdef Py_DEBUG
|
1997-04-29 18:03:06 -03:00
|
|
|
extern int Py_DebugFlag;
|
|
|
|
#define D(x) if (!Py_DebugFlag); else x
|
1990-10-14 09:07:46 -03:00
|
|
|
#else
|
|
|
|
#define D(x)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/* STACK DATA TYPE */
|
|
|
|
|
2000-07-09 00:09:57 -03:00
|
|
|
static void s_reset(stack *);
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
static void
|
2000-07-22 16:20:54 -03:00
|
|
|
s_reset(stack *s)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
s->s_top = &s->s_base[MAXSTACK];
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define s_empty(s) ((s)->s_top == &(s)->s_base[MAXSTACK])
|
|
|
|
|
|
|
|
static int
|
2019-04-23 08:39:37 -03:00
|
|
|
s_push(stack *s, const dfa *d, node *parent)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2013-08-13 15:18:52 -03:00
|
|
|
stackentry *top;
|
2010-05-09 12:52:27 -03:00
|
|
|
if (s->s_top == s->s_base) {
|
|
|
|
fprintf(stderr, "s_push: parser stack overflow\n");
|
|
|
|
return E_NOMEM;
|
|
|
|
}
|
|
|
|
top = --s->s_top;
|
|
|
|
top->s_dfa = d;
|
|
|
|
top->s_parent = parent;
|
|
|
|
top->s_state = 0;
|
|
|
|
return 0;
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
1996-12-30 12:17:54 -04:00
|
|
|
#ifdef Py_DEBUG
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
static void
|
2013-08-13 15:18:52 -03:00
|
|
|
s_pop(stack *s)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2020-03-06 19:54:20 -04:00
|
|
|
if (s_empty(s)) {
|
|
|
|
Py_FatalError("parser stack underflow");
|
|
|
|
}
|
2010-05-09 12:52:27 -03:00
|
|
|
s->s_top++;
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
1996-12-30 12:17:54 -04:00
|
|
|
#else /* !Py_DEBUG */
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
#define s_pop(s) (s)->s_top++
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/* PARSER CREATION */
|
|
|
|
|
|
|
|
parser_state *
|
2000-07-22 16:20:54 -03:00
|
|
|
PyParser_New(grammar *g, int start)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
parser_state *ps;
|
|
|
|
|
|
|
|
if (!g->g_accel)
|
|
|
|
PyGrammar_AddAccelerators(g);
|
|
|
|
ps = (parser_state *)PyMem_MALLOC(sizeof(parser_state));
|
|
|
|
if (ps == NULL)
|
|
|
|
return NULL;
|
|
|
|
ps->p_grammar = g;
|
2006-02-28 15:02:24 -04:00
|
|
|
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
|
2010-05-09 12:52:27 -03:00
|
|
|
ps->p_flags = 0;
|
2002-03-22 19:38:11 -04:00
|
|
|
#endif
|
2010-05-09 12:52:27 -03:00
|
|
|
ps->p_tree = PyNode_New(start);
|
|
|
|
if (ps->p_tree == NULL) {
|
|
|
|
PyMem_FREE(ps);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
s_reset(&ps->p_stack);
|
|
|
|
(void) s_push(&ps->p_stack, PyGrammar_FindDFA(g, start), ps->p_tree);
|
|
|
|
return ps;
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2000-07-22 16:20:54 -03:00
|
|
|
PyParser_Delete(parser_state *ps)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
/* NB If you want to save the parse tree,
|
|
|
|
you must set p_tree to NULL before calling delparser! */
|
|
|
|
PyNode_Free(ps->p_tree);
|
|
|
|
PyMem_FREE(ps);
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* PARSER STACK OPERATIONS */
|
|
|
|
|
|
|
|
static int
|
bpo-33416: Add end positions to Python AST (GH-11605)
The majority of this PR is tediously passing `end_lineno` and `end_col_offset` everywhere. Here are non-trivial points:
* It is not possible to reconstruct end positions in AST "on the fly", some information is lost after an AST node is constructed, so we need two more attributes for every AST node `end_lineno` and `end_col_offset`.
* I add end position information to both CST and AST. Although it may be technically possible to avoid adding end positions to CST, the code becomes more cumbersome and less efficient.
* Since the end position is not known for non-leaf CST nodes while the next token is added, this requires a bit of extra care (see `_PyNode_FinalizeEndPos`). Unless I made some mistake, the algorithm should be linear.
* For statements, I "trim" the end position of suites to not include the terminal newlines and dedent (this seems to be what people would expect), for example in
```python
class C:
pass
pass
```
the end line and end column for the class definition is (2, 8).
* For `end_col_offset` I use the common Python convention for indexing, for example for `pass` the `end_col_offset` is 4 (not 3), so that `[0:4]` gives one the source code that corresponds to the node.
* I added a helper function `ast.get_source_segment()`, to get source text segment corresponding to a given AST node. It is also useful for testing.
An (inevitable) downside of this PR is that AST now takes almost 25% more memory. I think however it is probably justified by the benefits.
2019-01-22 07:18:22 -04:00
|
|
|
shift(stack *s, int type, char *str, int newstate, int lineno, int col_offset,
|
|
|
|
int end_lineno, int end_col_offset)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
int err;
|
|
|
|
assert(!s_empty(s));
|
bpo-33416: Add end positions to Python AST (GH-11605)
The majority of this PR is tediously passing `end_lineno` and `end_col_offset` everywhere. Here are non-trivial points:
* It is not possible to reconstruct end positions in AST "on the fly", some information is lost after an AST node is constructed, so we need two more attributes for every AST node `end_lineno` and `end_col_offset`.
* I add end position information to both CST and AST. Although it may be technically possible to avoid adding end positions to CST, the code becomes more cumbersome and less efficient.
* Since the end position is not known for non-leaf CST nodes while the next token is added, this requires a bit of extra care (see `_PyNode_FinalizeEndPos`). Unless I made some mistake, the algorithm should be linear.
* For statements, I "trim" the end position of suites to not include the terminal newlines and dedent (this seems to be what people would expect), for example in
```python
class C:
pass
pass
```
the end line and end column for the class definition is (2, 8).
* For `end_col_offset` I use the common Python convention for indexing, for example for `pass` the `end_col_offset` is 4 (not 3), so that `[0:4]` gives one the source code that corresponds to the node.
* I added a helper function `ast.get_source_segment()`, to get source text segment corresponding to a given AST node. It is also useful for testing.
An (inevitable) downside of this PR is that AST now takes almost 25% more memory. I think however it is probably justified by the benefits.
2019-01-22 07:18:22 -04:00
|
|
|
err = PyNode_AddChild(s->s_top->s_parent, type, str, lineno, col_offset,
|
|
|
|
end_lineno, end_col_offset);
|
2010-05-09 12:52:27 -03:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
s->s_top->s_state = newstate;
|
|
|
|
return 0;
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-04-23 08:39:37 -03:00
|
|
|
push(stack *s, int type, const dfa *d, int newstate, int lineno, int col_offset,
|
bpo-33416: Add end positions to Python AST (GH-11605)
The majority of this PR is tediously passing `end_lineno` and `end_col_offset` everywhere. Here are non-trivial points:
* It is not possible to reconstruct end positions in AST "on the fly", some information is lost after an AST node is constructed, so we need two more attributes for every AST node `end_lineno` and `end_col_offset`.
* I add end position information to both CST and AST. Although it may be technically possible to avoid adding end positions to CST, the code becomes more cumbersome and less efficient.
* Since the end position is not known for non-leaf CST nodes while the next token is added, this requires a bit of extra care (see `_PyNode_FinalizeEndPos`). Unless I made some mistake, the algorithm should be linear.
* For statements, I "trim" the end position of suites to not include the terminal newlines and dedent (this seems to be what people would expect), for example in
```python
class C:
pass
pass
```
the end line and end column for the class definition is (2, 8).
* For `end_col_offset` I use the common Python convention for indexing, for example for `pass` the `end_col_offset` is 4 (not 3), so that `[0:4]` gives one the source code that corresponds to the node.
* I added a helper function `ast.get_source_segment()`, to get source text segment corresponding to a given AST node. It is also useful for testing.
An (inevitable) downside of this PR is that AST now takes almost 25% more memory. I think however it is probably justified by the benefits.
2019-01-22 07:18:22 -04:00
|
|
|
int end_lineno, int end_col_offset)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
int err;
|
2013-08-13 15:18:52 -03:00
|
|
|
node *n;
|
2010-05-09 12:52:27 -03:00
|
|
|
n = s->s_top->s_parent;
|
|
|
|
assert(!s_empty(s));
|
bpo-33416: Add end positions to Python AST (GH-11605)
The majority of this PR is tediously passing `end_lineno` and `end_col_offset` everywhere. Here are non-trivial points:
* It is not possible to reconstruct end positions in AST "on the fly", some information is lost after an AST node is constructed, so we need two more attributes for every AST node `end_lineno` and `end_col_offset`.
* I add end position information to both CST and AST. Although it may be technically possible to avoid adding end positions to CST, the code becomes more cumbersome and less efficient.
* Since the end position is not known for non-leaf CST nodes while the next token is added, this requires a bit of extra care (see `_PyNode_FinalizeEndPos`). Unless I made some mistake, the algorithm should be linear.
* For statements, I "trim" the end position of suites to not include the terminal newlines and dedent (this seems to be what people would expect), for example in
```python
class C:
pass
pass
```
the end line and end column for the class definition is (2, 8).
* For `end_col_offset` I use the common Python convention for indexing, for example for `pass` the `end_col_offset` is 4 (not 3), so that `[0:4]` gives one the source code that corresponds to the node.
* I added a helper function `ast.get_source_segment()`, to get source text segment corresponding to a given AST node. It is also useful for testing.
An (inevitable) downside of this PR is that AST now takes almost 25% more memory. I think however it is probably justified by the benefits.
2019-01-22 07:18:22 -04:00
|
|
|
err = PyNode_AddChild(n, type, (char *)NULL, lineno, col_offset,
|
|
|
|
end_lineno, end_col_offset);
|
2010-05-09 12:52:27 -03:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
s->s_top->s_state = newstate;
|
|
|
|
return s_push(s, d, CHILD(n, NCH(n)-1));
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* PARSER PROPER */
|
|
|
|
|
|
|
|
static int
|
2013-10-19 15:03:34 -03:00
|
|
|
classify(parser_state *ps, int type, const char *str)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
grammar *g = ps->p_grammar;
|
2013-08-13 15:18:52 -03:00
|
|
|
int n = g->g_ll.ll_nlabels;
|
2010-05-09 12:52:27 -03:00
|
|
|
|
|
|
|
if (type == NAME) {
|
2019-04-23 08:39:37 -03:00
|
|
|
const label *l = g->g_ll.ll_label;
|
2013-08-13 15:18:52 -03:00
|
|
|
int i;
|
2010-05-09 12:52:27 -03:00
|
|
|
for (i = n; i > 0; i--, l++) {
|
|
|
|
if (l->lb_type != NAME || l->lb_str == NULL ||
|
2016-03-27 18:45:28 -03:00
|
|
|
l->lb_str[0] != str[0] ||
|
|
|
|
strcmp(l->lb_str, str) != 0)
|
2010-05-09 12:52:27 -03:00
|
|
|
continue;
|
2006-02-28 15:02:24 -04:00
|
|
|
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
|
2009-04-01 02:08:41 -03:00
|
|
|
#if 0
|
2010-05-09 12:52:27 -03:00
|
|
|
/* Leaving this in as an example */
|
|
|
|
if (!(ps->p_flags & CO_FUTURE_WITH_STATEMENT)) {
|
2016-03-27 18:45:28 -03:00
|
|
|
if (str[0] == 'w' && strcmp(str, "with") == 0)
|
2010-05-09 12:52:27 -03:00
|
|
|
break; /* not a keyword yet */
|
2016-03-27 18:45:28 -03:00
|
|
|
else if (str[0] == 'a' && strcmp(str, "as") == 0)
|
2010-05-09 12:52:27 -03:00
|
|
|
break; /* not a keyword yet */
|
|
|
|
}
|
2009-04-01 02:08:41 -03:00
|
|
|
#endif
|
2006-02-28 18:42:15 -04:00
|
|
|
#endif
|
2010-05-09 12:52:27 -03:00
|
|
|
D(printf("It's a keyword\n"));
|
|
|
|
return n - i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2019-04-23 08:39:37 -03:00
|
|
|
const label *l = g->g_ll.ll_label;
|
2013-08-13 15:18:52 -03:00
|
|
|
int i;
|
2010-05-09 12:52:27 -03:00
|
|
|
for (i = n; i > 0; i--, l++) {
|
|
|
|
if (l->lb_type == type && l->lb_str == NULL) {
|
|
|
|
D(printf("It's a token we know\n"));
|
|
|
|
return n - i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
D(printf("Illegal token\n"));
|
|
|
|
return -1;
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
2006-02-28 15:02:24 -04:00
|
|
|
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
|
2009-04-01 02:08:41 -03:00
|
|
|
#if 0
|
2006-03-15 00:58:47 -04:00
|
|
|
/* Leaving this in as an example */
|
2001-07-15 18:08:29 -03:00
|
|
|
static void
|
|
|
|
future_hack(parser_state *ps)
|
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
node *n = ps->p_stack.s_top->s_parent;
|
|
|
|
node *ch, *cch;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* from __future__ import ..., must have at least 4 children */
|
|
|
|
n = CHILD(n, 0);
|
|
|
|
if (NCH(n) < 4)
|
|
|
|
return;
|
|
|
|
ch = CHILD(n, 0);
|
|
|
|
if (STR(ch) == NULL || strcmp(STR(ch), "from") != 0)
|
|
|
|
return;
|
|
|
|
ch = CHILD(n, 1);
|
|
|
|
if (NCH(ch) == 1 && STR(CHILD(ch, 0)) &&
|
|
|
|
strcmp(STR(CHILD(ch, 0)), "__future__") != 0)
|
|
|
|
return;
|
|
|
|
ch = CHILD(n, 3);
|
|
|
|
/* ch can be a star, a parenthesis or import_as_names */
|
|
|
|
if (TYPE(ch) == STAR)
|
|
|
|
return;
|
|
|
|
if (TYPE(ch) == LPAR)
|
|
|
|
ch = CHILD(n, 4);
|
|
|
|
|
|
|
|
for (i = 0; i < NCH(ch); i += 2) {
|
|
|
|
cch = CHILD(ch, i);
|
|
|
|
if (NCH(cch) >= 1 && TYPE(CHILD(cch, 0)) == NAME) {
|
|
|
|
char *str_ch = STR(CHILD(cch, 0));
|
|
|
|
if (strcmp(str_ch, FUTURE_WITH_STATEMENT) == 0) {
|
|
|
|
ps->p_flags |= CO_FUTURE_WITH_STATEMENT;
|
|
|
|
} else if (strcmp(str_ch, FUTURE_PRINT_FUNCTION) == 0) {
|
|
|
|
ps->p_flags |= CO_FUTURE_PRINT_FUNCTION;
|
|
|
|
} else if (strcmp(str_ch, FUTURE_UNICODE_LITERALS) == 0) {
|
|
|
|
ps->p_flags |= CO_FUTURE_UNICODE_LITERALS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2001-07-15 18:08:29 -03:00
|
|
|
}
|
2009-04-01 02:08:41 -03:00
|
|
|
#endif
|
2002-03-22 19:38:11 -04:00
|
|
|
#endif /* future keyword */
|
2001-07-15 18:08:29 -03:00
|
|
|
|
1990-10-14 09:07:46 -03:00
|
|
|
int
|
2013-08-13 15:18:52 -03:00
|
|
|
PyParser_AddToken(parser_state *ps, int type, char *str,
|
bpo-33416: Add end positions to Python AST (GH-11605)
The majority of this PR is tediously passing `end_lineno` and `end_col_offset` everywhere. Here are non-trivial points:
* It is not possible to reconstruct end positions in AST "on the fly", some information is lost after an AST node is constructed, so we need two more attributes for every AST node `end_lineno` and `end_col_offset`.
* I add end position information to both CST and AST. Although it may be technically possible to avoid adding end positions to CST, the code becomes more cumbersome and less efficient.
* Since the end position is not known for non-leaf CST nodes while the next token is added, this requires a bit of extra care (see `_PyNode_FinalizeEndPos`). Unless I made some mistake, the algorithm should be linear.
* For statements, I "trim" the end position of suites to not include the terminal newlines and dedent (this seems to be what people would expect), for example in
```python
class C:
pass
pass
```
the end line and end column for the class definition is (2, 8).
* For `end_col_offset` I use the common Python convention for indexing, for example for `pass` the `end_col_offset` is 4 (not 3), so that `[0:4]` gives one the source code that corresponds to the node.
* I added a helper function `ast.get_source_segment()`, to get source text segment corresponding to a given AST node. It is also useful for testing.
An (inevitable) downside of this PR is that AST now takes almost 25% more memory. I think however it is probably justified by the benefits.
2019-01-22 07:18:22 -04:00
|
|
|
int lineno, int col_offset,
|
|
|
|
int end_lineno, int end_col_offset,
|
|
|
|
int *expected_ret)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2013-08-13 15:18:52 -03:00
|
|
|
int ilabel;
|
2010-05-09 12:52:27 -03:00
|
|
|
int err;
|
|
|
|
|
|
|
|
D(printf("Token %s/'%s' ... ", _PyParser_TokenNames[type], str));
|
|
|
|
|
|
|
|
/* Find out which label this token is */
|
|
|
|
ilabel = classify(ps, type, str);
|
|
|
|
if (ilabel < 0)
|
|
|
|
return E_SYNTAX;
|
|
|
|
|
|
|
|
/* Loop until the token is shifted or an error occurred */
|
|
|
|
for (;;) {
|
|
|
|
/* Fetch the current dfa and state */
|
2019-04-23 08:39:37 -03:00
|
|
|
const dfa *d = ps->p_stack.s_top->s_dfa;
|
2013-08-13 15:18:52 -03:00
|
|
|
state *s = &d->d_state[ps->p_stack.s_top->s_state];
|
2010-05-09 12:52:27 -03:00
|
|
|
|
|
|
|
D(printf(" DFA '%s', state %d:",
|
|
|
|
d->d_name, ps->p_stack.s_top->s_state));
|
|
|
|
|
|
|
|
/* Check accelerator */
|
|
|
|
if (s->s_lower <= ilabel && ilabel < s->s_upper) {
|
2013-08-13 15:18:52 -03:00
|
|
|
int x = s->s_accel[ilabel - s->s_lower];
|
2010-05-09 12:52:27 -03:00
|
|
|
if (x != -1) {
|
|
|
|
if (x & (1<<7)) {
|
|
|
|
/* Push non-terminal */
|
|
|
|
int nt = (x >> 8) + NT_OFFSET;
|
|
|
|
int arrow = x & ((1<<7)-1);
|
2019-01-31 07:40:27 -04:00
|
|
|
if (nt == func_body_suite && !(ps->p_flags & PyCF_TYPE_COMMENTS)) {
|
|
|
|
/* When parsing type comments is not requested,
|
|
|
|
we can provide better errors about bad indentation
|
|
|
|
by using 'suite' for the body of a funcdef */
|
|
|
|
D(printf(" [switch func_body_suite to suite]"));
|
|
|
|
nt = suite;
|
|
|
|
}
|
2019-04-23 08:39:37 -03:00
|
|
|
const dfa *d1 = PyGrammar_FindDFA(
|
2010-05-09 12:52:27 -03:00
|
|
|
ps->p_grammar, nt);
|
|
|
|
if ((err = push(&ps->p_stack, nt, d1,
|
bpo-33416: Add end positions to Python AST (GH-11605)
The majority of this PR is tediously passing `end_lineno` and `end_col_offset` everywhere. Here are non-trivial points:
* It is not possible to reconstruct end positions in AST "on the fly", some information is lost after an AST node is constructed, so we need two more attributes for every AST node `end_lineno` and `end_col_offset`.
* I add end position information to both CST and AST. Although it may be technically possible to avoid adding end positions to CST, the code becomes more cumbersome and less efficient.
* Since the end position is not known for non-leaf CST nodes while the next token is added, this requires a bit of extra care (see `_PyNode_FinalizeEndPos`). Unless I made some mistake, the algorithm should be linear.
* For statements, I "trim" the end position of suites to not include the terminal newlines and dedent (this seems to be what people would expect), for example in
```python
class C:
pass
pass
```
the end line and end column for the class definition is (2, 8).
* For `end_col_offset` I use the common Python convention for indexing, for example for `pass` the `end_col_offset` is 4 (not 3), so that `[0:4]` gives one the source code that corresponds to the node.
* I added a helper function `ast.get_source_segment()`, to get source text segment corresponding to a given AST node. It is also useful for testing.
An (inevitable) downside of this PR is that AST now takes almost 25% more memory. I think however it is probably justified by the benefits.
2019-01-22 07:18:22 -04:00
|
|
|
arrow, lineno, col_offset,
|
|
|
|
end_lineno, end_col_offset)) > 0) {
|
2010-05-09 12:52:27 -03:00
|
|
|
D(printf(" MemError: push\n"));
|
|
|
|
return err;
|
|
|
|
}
|
2019-01-31 07:40:27 -04:00
|
|
|
D(printf(" Push '%s'\n", d1->d_name));
|
2010-05-09 12:52:27 -03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shift the token */
|
|
|
|
if ((err = shift(&ps->p_stack, type, str,
|
bpo-33416: Add end positions to Python AST (GH-11605)
The majority of this PR is tediously passing `end_lineno` and `end_col_offset` everywhere. Here are non-trivial points:
* It is not possible to reconstruct end positions in AST "on the fly", some information is lost after an AST node is constructed, so we need two more attributes for every AST node `end_lineno` and `end_col_offset`.
* I add end position information to both CST and AST. Although it may be technically possible to avoid adding end positions to CST, the code becomes more cumbersome and less efficient.
* Since the end position is not known for non-leaf CST nodes while the next token is added, this requires a bit of extra care (see `_PyNode_FinalizeEndPos`). Unless I made some mistake, the algorithm should be linear.
* For statements, I "trim" the end position of suites to not include the terminal newlines and dedent (this seems to be what people would expect), for example in
```python
class C:
pass
pass
```
the end line and end column for the class definition is (2, 8).
* For `end_col_offset` I use the common Python convention for indexing, for example for `pass` the `end_col_offset` is 4 (not 3), so that `[0:4]` gives one the source code that corresponds to the node.
* I added a helper function `ast.get_source_segment()`, to get source text segment corresponding to a given AST node. It is also useful for testing.
An (inevitable) downside of this PR is that AST now takes almost 25% more memory. I think however it is probably justified by the benefits.
2019-01-22 07:18:22 -04:00
|
|
|
x, lineno, col_offset,
|
|
|
|
end_lineno, end_col_offset)) > 0) {
|
2010-05-09 12:52:27 -03:00
|
|
|
D(printf(" MemError: shift.\n"));
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
D(printf(" Shift.\n"));
|
|
|
|
/* Pop while we are in an accept-only state */
|
|
|
|
while (s = &d->d_state
|
|
|
|
[ps->p_stack.s_top->s_state],
|
|
|
|
s->s_accept && s->s_narcs == 1) {
|
|
|
|
D(printf(" DFA '%s', state %d: "
|
|
|
|
"Direct pop.\n",
|
|
|
|
d->d_name,
|
|
|
|
ps->p_stack.s_top->s_state));
|
2006-02-28 15:02:24 -04:00
|
|
|
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
|
2009-04-01 02:08:41 -03:00
|
|
|
#if 0
|
2010-05-09 12:52:27 -03:00
|
|
|
if (d->d_name[0] == 'i' &&
|
|
|
|
strcmp(d->d_name,
|
|
|
|
"import_stmt") == 0)
|
|
|
|
future_hack(ps);
|
2009-04-01 02:08:41 -03:00
|
|
|
#endif
|
2002-03-22 19:38:11 -04:00
|
|
|
#endif
|
2010-05-09 12:52:27 -03:00
|
|
|
s_pop(&ps->p_stack);
|
|
|
|
if (s_empty(&ps->p_stack)) {
|
|
|
|
D(printf(" ACCEPT.\n"));
|
|
|
|
return E_DONE;
|
|
|
|
}
|
|
|
|
d = ps->p_stack.s_top->s_dfa;
|
|
|
|
}
|
|
|
|
return E_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->s_accept) {
|
2006-02-28 15:02:24 -04:00
|
|
|
#ifdef PY_PARSER_REQUIRES_FUTURE_KEYWORD
|
2009-04-01 02:08:41 -03:00
|
|
|
#if 0
|
2010-05-09 12:52:27 -03:00
|
|
|
if (d->d_name[0] == 'i' &&
|
|
|
|
strcmp(d->d_name, "import_stmt") == 0)
|
|
|
|
future_hack(ps);
|
2009-04-01 02:08:41 -03:00
|
|
|
#endif
|
2002-03-22 19:38:11 -04:00
|
|
|
#endif
|
2010-05-09 12:52:27 -03:00
|
|
|
/* Pop this dfa and try again */
|
|
|
|
s_pop(&ps->p_stack);
|
|
|
|
D(printf(" Pop ...\n"));
|
|
|
|
if (s_empty(&ps->p_stack)) {
|
|
|
|
D(printf(" Error: bottom of stack.\n"));
|
|
|
|
return E_SYNTAX;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stuck, report syntax error */
|
|
|
|
D(printf(" Error.\n"));
|
|
|
|
if (expected_ret) {
|
|
|
|
if (s->s_lower == s->s_upper - 1) {
|
|
|
|
/* Only one possible expected token */
|
|
|
|
*expected_ret = ps->p_grammar->
|
|
|
|
g_ll.ll_label[s->s_lower].lb_type;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
*expected_ret = -1;
|
|
|
|
}
|
|
|
|
return E_SYNTAX;
|
|
|
|
}
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
1996-12-30 12:17:54 -04:00
|
|
|
#ifdef Py_DEBUG
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
/* DEBUG OUTPUT */
|
|
|
|
|
|
|
|
void
|
2000-07-22 16:20:54 -03:00
|
|
|
dumptree(grammar *g, node *n)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (n == NULL)
|
|
|
|
printf("NIL");
|
|
|
|
else {
|
|
|
|
label l;
|
|
|
|
l.lb_type = TYPE(n);
|
|
|
|
l.lb_str = STR(n);
|
|
|
|
printf("%s", PyGrammar_LabelRepr(&l));
|
|
|
|
if (ISNONTERMINAL(TYPE(n))) {
|
|
|
|
printf("(");
|
|
|
|
for (i = 0; i < NCH(n); i++) {
|
|
|
|
if (i > 0)
|
|
|
|
printf(",");
|
|
|
|
dumptree(g, CHILD(n, i));
|
|
|
|
}
|
|
|
|
printf(")");
|
|
|
|
}
|
|
|
|
}
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2000-07-22 16:20:54 -03:00
|
|
|
showtree(grammar *g, node *n)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (n == NULL)
|
|
|
|
return;
|
|
|
|
if (ISNONTERMINAL(TYPE(n))) {
|
|
|
|
for (i = 0; i < NCH(n); i++)
|
|
|
|
showtree(g, CHILD(n, i));
|
|
|
|
}
|
|
|
|
else if (ISTERMINAL(TYPE(n))) {
|
|
|
|
printf("%s", _PyParser_TokenNames[TYPE(n)]);
|
|
|
|
if (TYPE(n) == NUMBER || TYPE(n) == NAME)
|
|
|
|
printf("(%s)", STR(n));
|
|
|
|
printf(" ");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
printf("? ");
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2000-07-22 16:20:54 -03:00
|
|
|
printtree(parser_state *ps)
|
1990-10-14 09:07:46 -03:00
|
|
|
{
|
2010-05-09 12:52:27 -03:00
|
|
|
if (Py_DebugFlag) {
|
|
|
|
printf("Parse tree:\n");
|
|
|
|
dumptree(ps->p_grammar, ps->p_tree);
|
|
|
|
printf("\n");
|
|
|
|
printf("Tokens:\n");
|
|
|
|
showtree(ps->p_grammar, ps->p_tree);
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
printf("Listing:\n");
|
|
|
|
PyNode_ListTree(ps->p_tree);
|
|
|
|
printf("\n");
|
1990-10-14 09:07:46 -03:00
|
|
|
}
|
|
|
|
|
1996-12-30 12:17:54 -04:00
|
|
|
#endif /* Py_DEBUG */
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
Description
|
|
|
|
-----------
|
|
|
|
|
|
|
|
The parser's interface is different than usual: the function addtoken()
|
|
|
|
must be called for each token in the input. This makes it possible to
|
|
|
|
turn it into an incremental parsing system later. The parsing system
|
|
|
|
constructs a parse tree as it goes.
|
|
|
|
|
|
|
|
A parsing rule is represented as a Deterministic Finite-state Automaton
|
|
|
|
(DFA). A node in a DFA represents a state of the parser; an arc represents
|
|
|
|
a transition. Transitions are either labeled with terminal symbols or
|
|
|
|
with non-terminals. When the parser decides to follow an arc labeled
|
|
|
|
with a non-terminal, it is invoked recursively with the DFA representing
|
|
|
|
the parsing rule for that as its initial state; when that DFA accepts,
|
|
|
|
the parser that invoked it continues. The parse tree constructed by the
|
|
|
|
recursively called parser is inserted as a child in the current parse tree.
|
|
|
|
|
|
|
|
The DFA's can be constructed automatically from a more conventional
|
|
|
|
language description. An extended LL(1) grammar (ELL(1)) is suitable.
|
|
|
|
Certain restrictions make the parser's life easier: rules that can produce
|
|
|
|
the empty string should be outlawed (there are other ways to put loops
|
|
|
|
or optional parts in the language). To avoid the need to construct
|
|
|
|
FIRST sets, we can require that all but the last alternative of a rule
|
|
|
|
(really: arc going out of a DFA's state) must begin with a terminal
|
|
|
|
symbol.
|
|
|
|
|
|
|
|
As an example, consider this grammar:
|
|
|
|
|
2010-05-09 12:52:27 -03:00
|
|
|
expr: term (OP term)*
|
|
|
|
term: CONSTANT | '(' expr ')'
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
The DFA corresponding to the rule for expr is:
|
|
|
|
|
|
|
|
------->.---term-->.------->
|
2010-05-09 12:52:27 -03:00
|
|
|
^ |
|
|
|
|
| |
|
|
|
|
\----OP----/
|
1990-10-14 09:07:46 -03:00
|
|
|
|
|
|
|
The parse tree generated for the input a+b is:
|
|
|
|
|
|
|
|
(expr: (term: (NAME: a)), (OP: +), (term: (NAME: b)))
|
|
|
|
|
|
|
|
*/
|