gh-124064: Fix -Wconversion warnings in Parser/pegen.c (#124181)

This commit is contained in:
Victor Stinner 2024-09-17 17:58:43 +02:00 committed by GitHub
parent ec08aa1fe4
commit 3aff1d0260
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 9 additions and 11 deletions

View File

@ -22,7 +22,7 @@ _PyPegen_interactive_exit(Parser *p)
Py_ssize_t Py_ssize_t
_PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset) _PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset)
{ {
const char *data = PyUnicode_AsUTF8(line); const unsigned char *data = (const unsigned char*)PyUnicode_AsUTF8(line);
Py_ssize_t len = 0; Py_ssize_t len = 0;
while (col_offset < end_col_offset) { while (col_offset < end_col_offset) {
@ -47,7 +47,7 @@ _PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_off
Py_ssize_t Py_ssize_t
_PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t col_offset) _PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t col_offset)
{ {
Py_ssize_t len = strlen(str); Py_ssize_t len = (Py_ssize_t)strlen(str);
if (col_offset > len + 1) { if (col_offset > len + 1) {
col_offset = len + 1; col_offset = len + 1;
} }
@ -158,7 +158,7 @@ growable_comment_array_deallocate(growable_comment_array *arr) {
static int static int
_get_keyword_or_name_type(Parser *p, struct token *new_token) _get_keyword_or_name_type(Parser *p, struct token *new_token)
{ {
int name_len = new_token->end_col_offset - new_token->col_offset; Py_ssize_t name_len = new_token->end_col_offset - new_token->col_offset;
assert(name_len > 0); assert(name_len > 0);
if (name_len >= p->n_keyword_lists || if (name_len >= p->n_keyword_lists ||
@ -167,7 +167,7 @@ _get_keyword_or_name_type(Parser *p, struct token *new_token)
return NAME; return NAME;
} }
for (KeywordToken *k = p->keywords[name_len]; k != NULL && k->type != -1; k++) { for (KeywordToken *k = p->keywords[name_len]; k != NULL && k->type != -1; k++) {
if (strncmp(k->str, new_token->start, name_len) == 0) { if (strncmp(k->str, new_token->start, (size_t)name_len) == 0) {
return k->type; return k->type;
} }
} }
@ -218,7 +218,7 @@ initialize_token(Parser *p, Token *parser_token, struct token *new_token, int to
static int static int
_resize_tokens_array(Parser *p) { _resize_tokens_array(Parser *p) {
int newsize = p->size * 2; int newsize = p->size * 2;
Token **new_tokens = PyMem_Realloc(p->tokens, newsize * sizeof(Token *)); Token **new_tokens = PyMem_Realloc(p->tokens, (size_t)newsize * sizeof(Token *));
if (new_tokens == NULL) { if (new_tokens == NULL) {
PyErr_NoMemory(); PyErr_NoMemory();
return -1; return -1;
@ -247,12 +247,12 @@ _PyPegen_fill_token(Parser *p)
// Record and skip '# type: ignore' comments // Record and skip '# type: ignore' comments
while (type == TYPE_IGNORE) { while (type == TYPE_IGNORE) {
Py_ssize_t len = new_token.end_col_offset - new_token.col_offset; Py_ssize_t len = new_token.end_col_offset - new_token.col_offset;
char *tag = PyMem_Malloc(len + 1); char *tag = PyMem_Malloc((size_t)len + 1);
if (tag == NULL) { if (tag == NULL) {
PyErr_NoMemory(); PyErr_NoMemory();
goto error; goto error;
} }
strncpy(tag, new_token.start, len); strncpy(tag, new_token.start, (size_t)len);
tag[len] = '\0'; tag[len] = '\0';
// Ownership of tag passes to the growable array // Ownership of tag passes to the growable array
if (!growable_comment_array_add(&p->type_ignore_comments, p->tok->lineno, tag)) { if (!growable_comment_array_add(&p->type_ignore_comments, p->tok->lineno, tag)) {
@ -505,7 +505,7 @@ _PyPegen_get_last_nonnwhitespace_token(Parser *p)
PyObject * PyObject *
_PyPegen_new_identifier(Parser *p, const char *n) _PyPegen_new_identifier(Parser *p, const char *n)
{ {
PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); PyObject *id = PyUnicode_DecodeUTF8(n, (Py_ssize_t)strlen(n), NULL);
if (!id) { if (!id) {
goto error; goto error;
} }
@ -601,7 +601,7 @@ expr_ty _PyPegen_soft_keyword_token(Parser *p) {
Py_ssize_t size; Py_ssize_t size;
PyBytes_AsStringAndSize(t->bytes, &the_token, &size); PyBytes_AsStringAndSize(t->bytes, &the_token, &size);
for (char **keyword = p->soft_keywords; *keyword != NULL; keyword++) { for (char **keyword = p->soft_keywords; *keyword != NULL; keyword++) {
if (strncmp(*keyword, the_token, size) == 0) { if (strncmp(*keyword, the_token, (size_t)size) == 0) {
return _PyPegen_name_from_token(p, t); return _PyPegen_name_from_token(p, t);
} }
} }

View File

@ -173,7 +173,6 @@ Parser/action_helpers.c 4
Parser/lexer/buffer.c 1 Parser/lexer/buffer.c 1
Parser/lexer/lexer.c 12 Parser/lexer/lexer.c 12
Parser/parser.c 116 Parser/parser.c 116
Parser/pegen.c 7
Parser/string_parser.c 7 Parser/string_parser.c 7
Parser/tokenizer/file_tokenizer.c 8 Parser/tokenizer/file_tokenizer.c 8
Parser/tokenizer/helpers.c 7 Parser/tokenizer/helpers.c 7

View File

@ -199,7 +199,6 @@ Parser/action_helpers.c 3
Parser/lexer/buffer.c 1 Parser/lexer/buffer.c 1
Parser/lexer/lexer.c 14 Parser/lexer/lexer.c 14
Parser/parser.c 116 Parser/parser.c 116
Parser/pegen.c 8
Parser/string_parser.c 7 Parser/string_parser.c 7
Parser/tokenizer/file_tokenizer.c 9 Parser/tokenizer/file_tokenizer.c 9
Parser/tokenizer/helpers.c 7 Parser/tokenizer/helpers.c 7