Years in the making.

objimpl.h, pymem.h:  Stop mapping PyMem_{Del, DEL} and PyMem_{Free, FREE}
to PyObject_{Free, FREE} in a release build.  They're aliases for the
system free() now.

_subprocess.c/sp_handle_dealloc():  Since the memory was originally
obtained via PyObject_NEW, it must be released via PyObject_FREE (or
_DEL).

pythonrun.c, tokenizer.c, parsermodule.c:  I lost count of the number of
PyObject vs PyMem mismatches in these -- it's like the specific
function called at each site was picked at random, sometimes even with
memory obtained via PyMem getting released via PyObject.  Changed most
to use PyObject uniformly, since the blobs allocated are predictably
small in most cases, and obmalloc is generally faster than system
mallocs then.

If extension modules in real life prove as sloppy as Python's front
end, we'll have to revert the objimpl.h + pymem.h part of this patch.
Note that no problems will show up in a debug build (all calls still go
thru obmalloc then). Problems will show up only in a release build, most
likely segfaults.
This commit is contained in:
Tim Peters 2006-03-26 23:27:58 +00:00
parent 1c168d8eeb
commit c9d78aa470
7 changed files with 72 additions and 63 deletions

View File

@ -101,7 +101,7 @@ PyAPI_FUNC(void) PyObject_Free(void *);
/* Macros */
#ifdef WITH_PYMALLOC
#ifdef PYMALLOC_DEBUG
#ifdef PYMALLOC_DEBUG /* WITH_PYMALLOC && PYMALLOC_DEBUG */
PyAPI_FUNC(void *) _PyObject_DebugMalloc(size_t nbytes);
PyAPI_FUNC(void *) _PyObject_DebugRealloc(void *p, size_t nbytes);
PyAPI_FUNC(void) _PyObject_DebugFree(void *p);
@ -124,11 +124,7 @@ PyAPI_FUNC(void) _PyObject_DebugMallocStats(void);
#else /* ! WITH_PYMALLOC */
#define PyObject_MALLOC PyMem_MALLOC
#define PyObject_REALLOC PyMem_REALLOC
/* This is an odd one! For backward compatibility with old extensions, the
PyMem "release memory" functions have to invoke the object allocator's
free() function. When pymalloc isn't enabled, that leaves us using
the platform free(). */
#define PyObject_FREE free
#define PyObject_FREE PyMem_FREE
#endif /* WITH_PYMALLOC */

View File

@ -59,6 +59,7 @@ PyAPI_FUNC(void) PyMem_Free(void *);
/* Redirect all memory operations to Python's debugging allocator. */
#define PyMem_MALLOC PyObject_MALLOC
#define PyMem_REALLOC PyObject_REALLOC
#define PyMem_FREE PyObject_FREE
#else /* ! PYMALLOC_DEBUG */
@ -68,14 +69,10 @@ PyAPI_FUNC(void) PyMem_Free(void *);
pymalloc. To solve these problems, allocate an extra byte. */
#define PyMem_MALLOC(n) malloc((n) ? (n) : 1)
#define PyMem_REALLOC(p, n) realloc((p), (n) ? (n) : 1)
#define PyMem_FREE free
#endif /* PYMALLOC_DEBUG */
/* In order to avoid breaking old code mixing PyObject_{New, NEW} with
PyMem_{Del, DEL} and PyMem_{Free, FREE}, the PyMem "release memory"
functions have to be redirected to the object deallocator. */
#define PyMem_FREE PyObject_FREE
/*
* Type-oriented memory interface
* ==============================
@ -95,11 +92,11 @@ PyAPI_FUNC(void) PyMem_Free(void *);
#define PyMem_RESIZE(p, type, n) \
( (p) = (type *) PyMem_REALLOC((p), (n) * sizeof(type)) )
/* In order to avoid breaking old code mixing PyObject_{New, NEW} with
PyMem_{Del, DEL} and PyMem_{Free, FREE}, the PyMem "release memory"
functions have to be redirected to the object deallocator. */
#define PyMem_Del PyObject_Free
#define PyMem_DEL PyObject_FREE
/* PyMem{Del,DEL} are left over from ancient days, and shouldn't be used
* anymore. They're just confusing aliases for PyMem_{Free,FREE} now.
*/
#define PyMem_Del PyMem_Free
#define PyMem_DEL PyMem_FREE
#ifdef __cplusplus
}

View File

@ -311,7 +311,7 @@ Extension Modules
- Everything under lib-old was removed. This includes the following modules:
Para, addpack, cmp, cmpcache, codehack, dircmp, dump, find, fmt, grep,
lockfile, newdir, ni, packmail, poly, rand, statcache, tb, tzparse,
lockfile, newdir, ni, packmail, poly, rand, statcache, tb, tzparse,
util, whatsound, whrandom, zmod
- The following modules were removed: regsub, reconvert, regex, regex_syntax.
@ -942,6 +942,17 @@ Build
C API
-----
- ``PyMem_{Del, DEL}`` and ``PyMem_{Free, FREE}`` no longer map to
``PyObject_{Free, FREE}``. They map to the system ``free()`` now. If memory
is obtained via the ``PyObject_`` family, it must be released via the
``PyObject_`` family, and likewise for the ``PyMem_`` family. This has
always been officially true, but when Python's small-object allocator was
introduced, an attempt was made to cater to a few extension modules
discovered at the time that obtained memory via ``PyObject_New`` but
released it via ``PyMem_DEL``. It's years later, and if such code still
exists it will fail now (probably with segfaults, but calling wrong
low-level memory management functions can yield many symptoms).
- Added a C API for set and frozenset objects.
- Removed PyRange_New().

View File

@ -701,7 +701,7 @@ build_node_children(PyObject *tuple, node *root, int *line_num)
}
}
len = PyString_GET_SIZE(temp) + 1;
strn = (char *)PyMem_MALLOC(len);
strn = (char *)PyObject_MALLOC(len);
if (strn != NULL)
(void) memcpy(strn, PyString_AS_STRING(temp), len);
Py_DECREF(temp);
@ -719,11 +719,11 @@ build_node_children(PyObject *tuple, node *root, int *line_num)
}
err = PyNode_AddChild(root, type, strn, *line_num, 0);
if (err == E_NOMEM) {
PyMem_DEL(strn);
PyObject_FREE(strn);
return (node *) PyErr_NoMemory();
}
if (err == E_OVERFLOW) {
PyMem_DEL(strn);
PyObject_FREE(strn);
PyErr_SetString(PyExc_ValueError,
"unsupported number of child nodes");
return NULL;
@ -742,7 +742,7 @@ build_node_children(PyObject *tuple, node *root, int *line_num)
}
Py_XDECREF(elem);
}
return (root);
return root;
}
@ -787,7 +787,7 @@ build_node_tree(PyObject *tuple)
if (res && encoding) {
Py_ssize_t len;
len = PyString_GET_SIZE(encoding) + 1;
res->n_str = (char *)PyMem_MALLOC(len);
res->n_str = (char *)PyObject_MALLOC(len);
if (res->n_str != NULL)
(void) memcpy(res->n_str, PyString_AS_STRING(encoding), len);
Py_DECREF(encoding);

View File

@ -104,7 +104,7 @@ sp_handle_dealloc(sp_handle_object* self)
{
if (self->handle != INVALID_HANDLE_VALUE)
CloseHandle(self->handle);
PyMem_DEL(self);
PyObject_FREE(self);
}
static PyMethodDef sp_handle_methods[] = {

View File

@ -163,7 +163,7 @@ error_ret(struct tok_state *tok) /* XXX */
{
tok->decoding_erred = 1;
if (tok->fp != NULL && tok->buf != NULL) /* see PyTokenizer_Free */
PyMem_DEL(tok->buf);
PyObject_FREE(tok->buf);
tok->buf = NULL;
return NULL; /* as if it were EOF */
}
@ -171,7 +171,7 @@ error_ret(struct tok_state *tok) /* XXX */
static char *
new_string(const char *s, Py_ssize_t len)
{
char* result = PyMem_NEW(char, len + 1);
char* result = (char *)PyObject_MALLOC(len + 1);
if (result != NULL) {
memcpy(result, s, len);
result[len] = '\0';
@ -236,7 +236,7 @@ get_coding_spec(const char *s, Py_ssize_t size)
char* r = new_string(begin, t - begin);
char* q = get_normal_name(r);
if (r != q) {
PyMem_DEL(r);
PyObject_FREE(r);
r = new_string(q, strlen(q));
}
return r;
@ -277,18 +277,18 @@ check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
tok->decoding_state = -1;
}
else
PyMem_DEL(cs);
PyObject_FREE(cs);
#else
/* Without Unicode support, we cannot
process the coding spec. Since there
won't be any Unicode literals, that
won't matter. */
PyMem_DEL(cs);
PyObject_FREE(cs);
#endif
}
} else { /* then, compare cs with BOM */
r = (strcmp(tok->encoding, cs) == 0);
PyMem_DEL(cs);
PyObject_FREE(cs);
}
}
if (!r) {
@ -334,7 +334,7 @@ check_bom(int get_char(struct tok_state *),
return 1;
}
if (tok->encoding != NULL)
PyMem_DEL(tok->encoding);
PyObject_FREE(tok->encoding);
tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */
return 1;
NON_BOM:
@ -345,7 +345,7 @@ check_bom(int get_char(struct tok_state *),
/* Read a line of text from TOK into S, using the stream in TOK.
Return NULL on failure, else S.
On entry, tok->decoding_buffer will be one of:
1) NULL: need to call tok->decoding_readline to get a new line
2) PyUnicodeObject *: decoding_feof has called tok->decoding_readline and
@ -354,7 +354,7 @@ check_bom(int get_char(struct tok_state *),
(in the s buffer) to copy entire contents of the line read
by tok->decoding_readline. tok->decoding_buffer has the overflow.
In this case, fp_readl is called in a loop (with an expanded buffer)
until the buffer ends with a '\n' (or until the end of the file is
until the buffer ends with a '\n' (or until the end of the file is
reached): see tok_nextc and its calls to decoding_fgets.
*/
@ -470,7 +470,7 @@ decoding_fgets(char *s, int size, struct tok_state *tok)
break;
} else if (tok->decoding_state > 0) {
/* We want a 'raw' read. */
line = Py_UniversalNewlineFgets(s, size,
line = Py_UniversalNewlineFgets(s, size,
tok->fp, NULL);
break;
} else {
@ -502,11 +502,11 @@ decoding_fgets(char *s, int size, struct tok_state *tok)
char buf[500];
/* Need to add 1 to the line number, since this line
has not been counted, yet. */
sprintf(buf,
sprintf(buf,
"Non-ASCII character '\\x%.2x' "
"in file %.200s on line %i, "
"but no encoding declared; "
"see http://www.python.org/peps/pep-0263.html for details",
"see http://www.python.org/peps/pep-0263.html for details",
badchar, tok->filename, tok->lineno + 1);
PyErr_SetString(PyExc_SyntaxError, buf);
return error_ret(tok);
@ -537,13 +537,15 @@ decoding_feof(struct tok_state *tok)
/* Fetch a byte from TOK, using the string buffer. */
static int buf_getc(struct tok_state *tok) {
static int
buf_getc(struct tok_state *tok) {
return Py_CHARMASK(*tok->str++);
}
/* Unfetch a byte from TOK, using the string buffer. */
static void buf_ungetc(int c, struct tok_state *tok) {
static void
buf_ungetc(int c, struct tok_state *tok) {
tok->str--;
assert(Py_CHARMASK(*tok->str) == c); /* tok->cur may point to read-only segment */
}
@ -551,7 +553,8 @@ static void buf_ungetc(int c, struct tok_state *tok) {
/* Set the readline function for TOK to ENC. For the string-based
tokenizer, this means to just record the encoding. */
static int buf_setreadl(struct tok_state *tok, const char* enc) {
static int
buf_setreadl(struct tok_state *tok, const char* enc) {
tok->enc = enc;
return 1;
}
@ -653,7 +656,7 @@ PyTokenizer_FromFile(FILE *fp, char *ps1, char *ps2)
struct tok_state *tok = tok_new();
if (tok == NULL)
return NULL;
if ((tok->buf = PyMem_NEW(char, BUFSIZ)) == NULL) {
if ((tok->buf = (char *)PyObject_MALLOC(BUFSIZ)) == NULL) {
PyTokenizer_Free(tok);
return NULL;
}
@ -672,14 +675,14 @@ void
PyTokenizer_Free(struct tok_state *tok)
{
if (tok->encoding != NULL)
PyMem_DEL(tok->encoding);
PyObject_FREE(tok->encoding);
#ifndef PGEN
Py_XDECREF(tok->decoding_readline);
Py_XDECREF(tok->decoding_buffer);
#endif
if (tok->fp != NULL && tok->buf != NULL)
PyMem_DEL(tok->buf);
PyMem_DEL(tok);
PyObject_FREE(tok->buf);
PyMem_FREE(tok);
}
#if !defined(PGEN) && defined(Py_USING_UNICODE)
@ -721,7 +724,7 @@ tok_stdin_decode(struct tok_state *tok, char **inp)
PyMem_FREE(*inp);
*inp = converted;
if (tok->encoding != NULL)
PyMem_DEL(tok->encoding);
PyObject_FREE(tok->encoding);
tok->encoding = new_string(encoding, strlen(encoding));
if (tok->encoding == NULL)
goto error_nomem;
@ -790,10 +793,10 @@ tok_nextc(register struct tok_state *tok)
size_t oldlen = tok->cur - tok->buf;
size_t newlen = oldlen + strlen(new);
char *buf = tok->buf;
PyMem_RESIZE(buf, char, newlen+1);
buf = (char *)PyObject_REALLOC(buf, newlen+1);
tok->lineno++;
if (buf == NULL) {
PyMem_DEL(tok->buf);
PyObject_FREE(tok->buf);
tok->buf = NULL;
PyMem_FREE(new);
tok->done = E_NOMEM;
@ -811,7 +814,7 @@ tok_nextc(register struct tok_state *tok)
else {
tok->lineno++;
if (tok->buf != NULL)
PyMem_DEL(tok->buf);
PyObject_FREE(tok->buf);
tok->buf = new;
tok->line_start = tok->buf;
tok->cur = tok->buf;
@ -826,7 +829,8 @@ tok_nextc(register struct tok_state *tok)
char *pt;
if (tok->start == NULL) {
if (tok->buf == NULL) {
tok->buf = PyMem_NEW(char, BUFSIZ);
tok->buf = (char *)
PyObject_MALLOC(BUFSIZ);
if (tok->buf == NULL) {
tok->done = E_NOMEM;
return EOF;
@ -861,7 +865,8 @@ tok_nextc(register struct tok_state *tok)
Py_ssize_t curvalid = tok->inp - tok->buf;
Py_ssize_t newsize = curvalid + BUFSIZ;
char *newbuf = tok->buf;
PyMem_RESIZE(newbuf, char, newsize);
newbuf = (char *)PyObject_REALLOC(newbuf,
newsize);
if (newbuf == NULL) {
tok->done = E_NOMEM;
tok->cur = tok->inp;
@ -1184,9 +1189,9 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
}
}
}
tok->start = tok->cur;
/* Return pending indents/dedents */
if (tok->pendin != 0) {
if (tok->pendin < 0) {
@ -1198,17 +1203,17 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
return INDENT;
}
}
again:
tok->start = NULL;
/* Skip spaces */
do {
c = tok_nextc(tok);
} while (c == ' ' || c == '\t' || c == '\014');
/* Set start of current token */
tok->start = tok->cur - 1;
/* Skip comment, while looking for tab-setting magic */
if (c == '#') {
static char *tabforms[] = {
@ -1226,7 +1231,7 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
} while (c != EOF && c != '\n' &&
tp - cbuf + 1 < sizeof(cbuf));
*tp = '\0';
for (cp = tabforms;
for (cp = tabforms;
cp < tabforms + sizeof(tabforms)/sizeof(tabforms[0]);
cp++) {
if ((tp = strstr(cbuf, *cp))) {
@ -1244,12 +1249,12 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
while (c != EOF && c != '\n')
c = tok_nextc(tok);
}
/* Check for EOF and errors now */
if (c == EOF) {
return tok->done == E_EOF ? ENDMARKER : ERRORTOKEN;
}
/* Identifier (most frequent token!) */
if (isalpha(c) || c == '_') {
/* Process r"", u"" and ur"" */
@ -1277,7 +1282,7 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
*p_end = tok->cur;
return NAME;
}
/* Newline */
if (c == '\n') {
tok->atbol = 1;
@ -1288,7 +1293,7 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
tok->cont_line = 0;
return NEWLINE;
}
/* Period or number starting with period? */
if (c == '.') {
c = tok_nextc(tok);
@ -1451,7 +1456,7 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
*p_end = tok->cur;
return STRING;
}
/* Line continuation */
if (c == '\\') {
c = tok_nextc(tok);
@ -1463,7 +1468,7 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
tok->cont_line = 1;
goto again; /* Read next line */
}
/* Check for two-character token */
{
int c2 = tok_nextc(tok);
@ -1482,7 +1487,7 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
}
tok_backup(tok, c2);
}
/* Keep track of parentheses nesting level */
switch (c) {
case '(':
@ -1496,7 +1501,7 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
tok->level--;
break;
}
/* Punctuation character */
*p_start = tok->start;
*p_end = tok->cur;

View File

@ -1457,7 +1457,7 @@ err_input(perrdetail *err)
v = Py_BuildValue("(ziiz)", err->filename,
err->lineno, err->offset, err->text);
if (err->text != NULL) {
PyMem_DEL(err->text);
PyObject_FREE(err->text);
err->text = NULL;
}
w = NULL;