[3.13] gh-119118: Fix performance regression in tokenize module (GH-119615) (#119682)

- Cache line object to avoid creating a Unicode object
  for all of the tokens in the same line.
- Speed up byte offset to column offset conversion by using the
  smallest buffer possible to measure the difference.

(cherry picked from commit d87b015106)

Co-authored-by: Lysandros Nikolaou <lisandrosnik@gmail.com>
Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
This commit is contained in:
Miss Islington (bot) 2024-05-28 22:47:45 +02:00 committed by GitHub
parent c0e9961798
commit 0d0be6b3ef
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 68 additions and 4 deletions

View File

@ -0,0 +1,2 @@
Fix performance regression in the :mod:`tokenize` module by caching the ``line``
token attribute and calculating the column offset more efficiently.

View File

@ -18,6 +18,31 @@ _PyPegen_interactive_exit(Parser *p)
return NULL;
}
Py_ssize_t
_PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset)
{
const char *data = PyUnicode_AsUTF8(line);
Py_ssize_t len = 0;
while (col_offset < end_col_offset) {
Py_UCS4 ch = data[col_offset];
if (ch < 0x80) {
col_offset += 1;
} else if ((ch & 0xe0) == 0xc0) {
col_offset += 2;
} else if ((ch & 0xf0) == 0xe0) {
col_offset += 3;
} else if ((ch & 0xf8) == 0xf0) {
col_offset += 4;
} else {
PyErr_SetString(PyExc_ValueError, "Invalid UTF-8 sequence");
return -1;
}
len++;
}
return len;
}
Py_ssize_t
_PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t col_offset)
{

View File

@ -148,6 +148,7 @@ int _PyPegen_fill_token(Parser *p);
expr_ty _PyPegen_name_token(Parser *p);
expr_ty _PyPegen_number_token(Parser *p);
void *_PyPegen_string_token(Parser *p);
Py_ssize_t _PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset(PyObject *line, Py_ssize_t col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset_raw(const char*, Py_ssize_t col_offset);

View File

@ -32,6 +32,11 @@ typedef struct
{
PyObject_HEAD struct tok_state *tok;
int done;
/* Needed to cache line for performance */
PyObject *last_line;
Py_ssize_t last_lineno;
Py_ssize_t byte_col_offset_diff;
} tokenizeriterobject;
/*[clinic input]
@ -68,6 +73,11 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline,
self->tok->tok_extra_tokens = 1;
}
self->done = 0;
self->last_line = NULL;
self->byte_col_offset_diff = 0;
self->last_lineno = 0;
return (PyObject *)self;
}
@ -210,7 +220,18 @@ tokenizeriter_next(tokenizeriterobject *it)
if (size >= 1 && it->tok->implicit_newline) {
size -= 1;
}
line = PyUnicode_DecodeUTF8(line_start, size, "replace");
if (it->tok->lineno != it->last_lineno) {
// Line has changed since last token, so we fetch the new line and cache it
// in the iter object.
Py_XDECREF(it->last_line);
line = PyUnicode_DecodeUTF8(line_start, size, "replace");
it->last_line = line;
it->byte_col_offset_diff = 0;
} else {
// Line hasn't changed so we reuse the cached one.
line = it->last_line;
}
}
if (line == NULL) {
Py_DECREF(str);
@ -219,13 +240,28 @@ tokenizeriter_next(tokenizeriterobject *it)
Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
Py_ssize_t end_lineno = it->tok->lineno;
it->last_lineno = lineno;
Py_ssize_t col_offset = -1;
Py_ssize_t end_col_offset = -1;
Py_ssize_t byte_offset = -1;
if (token.start != NULL && token.start >= line_start) {
col_offset = _PyPegen_byte_offset_to_character_offset(line, token.start - line_start);
byte_offset = token.start - line_start;
col_offset = byte_offset - it->byte_col_offset_diff;
}
if (token.end != NULL && token.end >= it->tok->line_start) {
end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, token.end - it->tok->line_start);
Py_ssize_t end_byte_offset = token.end - it->tok->line_start;
if (lineno == end_lineno) {
// If the whole token is at the same line, we can just use the token.start
// buffer for figuring out the new column offset, since using line is not
// performant for very long lines.
Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset);
end_col_offset = col_offset + token_col_offset;
it->byte_col_offset_diff += token.end - token.start - token_col_offset;
} else {
end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset);
it->byte_col_offset_diff += end_byte_offset - end_col_offset;
}
}
if (it->tok->tok_extra_tokens) {
@ -262,7 +298,7 @@ tokenizeriter_next(tokenizeriterobject *it)
}
}
result = Py_BuildValue("(iN(nn)(nn)N)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
result = Py_BuildValue("(iN(nn)(nn)O)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
exit:
_PyToken_Free(&token);
if (type == ENDMARKER) {