Patch by Charles G Waldman to avoid a sneaky memory leak in

_PyTuple_Resize().  In addition, a change suggested by Jeremy Hylton
to limit the size of the free lists is also merged into this patch.

Charles wrote initially:

"""
Test Case:  run the following code:

class Nothing:
    def __len__(self):
        return 5
    def __getitem__(self, i):
        if i < 3:
            return i
        else:
            raise IndexError, i

def g(a,*b,**c):
    return

for x in xrange(1000000):
    g(*Nothing())


and watch Python's memory use go up and up.


Diagnosis:

The analysis begins with the call to PySequence_Tuple at line 1641 in
ceval.c - the argument to g is seen to be a sequence but not a tuple,
so it needs to be converted from an abstract sequence to a concrete
tuple.  PySequence_Tuple starts off by creating a new tuple of length
5 (line 1122 in abstract.c).  Then at line 1149, since only 3 elements
were assigned, _PyTuple_Resize is called to make the 5-tuple into a
3-tuple.  When we're all done the 3-tuple is decrefed, but rather than
being freed it is placed on the free_tuples cache.

The basic problem is that the 3-tuples are being added to the cache
but never picked up again, since _PyTuple_Resize doesn't make use of
the free_tuples cache.  If you are resizing a 5-tuple to a 3-tuple and
there is already a 3-tuple in free_tuples[3], instead of using this
tuple, _PyTuple_Resize will realloc the 5-tuple to a 3-tuple.  It
would more efficient to use the existing 3-tuple and cache the
5-tuple.

By making _PyTuple_Resize aware of the free_tuples (just as
PyTuple_New), we not only save a few calls to realloc, but also
prevent this misbehavior whereby tuples are being added to the
free_tuples list but never properly "recycled".
"""

And later:

"""
This patch replaces my submission of Sun, 16 Apr and addresses Jeremy
Hylton's suggestions that we also limit the size of the free tuple
list.  I chose 2000 as the maximum number of tuples of any particular
size to save.

There was also a problem with the previous version of this patch
causing a core dump if Python was built with Py_TRACE_REFS.  This is
fixed in the below version of the patch, which uses tupledealloc
instead of _Py_Dealloc.
"""
This commit is contained in:
Guido van Rossum 2000-04-21 21:15:05 +00:00
parent 84219682fb
commit 5ce78f8e4e
1 changed files with 59 additions and 16 deletions

View File

@ -33,15 +33,20 @@ PERFORMANCE OF THIS SOFTWARE.
#include "Python.h"
/* Speed optimization to avoid frequent malloc/free of small tuples */
#ifndef MAXSAVESIZE
#define MAXSAVESIZE 20
#define MAXSAVESIZE 20 /* Largest tuple to save on free list */
#endif
#ifndef MAXSAVEDTUPLES
#define MAXSAVEDTUPLES 2000 /* Maximum number of tuples of each size to save */
#endif
#if MAXSAVESIZE > 0
/* Entries 1 upto MAXSAVESIZE are free lists, entry 0 is the empty
/* Entries 1 up to MAXSAVESIZE are free lists, entry 0 is the empty
tuple () of which at most one instance will be allocated.
*/
static PyTupleObject *free_tuples[MAXSAVESIZE];
static int num_free_tuples[MAXSAVESIZE];
#endif
#ifdef COUNT_ALLOCS
int fast_tuple_allocs;
@ -71,6 +76,7 @@ PyTuple_New(size)
(op = free_tuples[size]) != NULL)
{
free_tuples[size] = (PyTupleObject *) op->ob_item[0];
num_free_tuples[size]--;
#ifdef COUNT_ALLOCS
fast_tuple_allocs++;
#endif
@ -104,6 +110,7 @@ PyTuple_New(size)
#if MAXSAVESIZE > 0
if (size == 0) {
free_tuples[0] = op;
++num_free_tuples[0];
Py_INCREF(op); /* extra INCREF so that this is never freed */
}
#endif
@ -171,16 +178,17 @@ tupledealloc(op)
register PyTupleObject *op;
{
register int i;
register int len = op->ob_size;
Py_TRASHCAN_SAFE_BEGIN(op)
if (op->ob_size > 0) {
i = op->ob_size;
if (len > 0) {
i = len;
while (--i >= 0)
Py_XDECREF(op->ob_item[i]);
#if MAXSAVESIZE > 0
if (op->ob_size < MAXSAVESIZE) {
op->ob_item[0] = (PyObject *) free_tuples[op->ob_size];
free_tuples[op->ob_size] = op;
if (len < MAXSAVESIZE && num_free_tuples[len] < MAXSAVEDTUPLES) {
op->ob_item[0] = (PyObject *) free_tuples[len];
num_free_tuples[len]++;
free_tuples[len] = op;
goto done; /* return */
}
#endif
@ -469,14 +477,49 @@ _PyTuple_Resize(pv, newsize, last_is_sticky)
Py_XDECREF(v->ob_item[i]);
v->ob_item[i] = NULL;
}
sv = (PyTupleObject *)
realloc((char *)v,
sizeof(PyTupleObject) + newsize * sizeof(PyObject *));
*pv = (PyObject *) sv;
if (sv == NULL) {
PyMem_DEL(v);
PyErr_NoMemory();
return -1;
#if MAXSAVESIZE > 0
if (newsize == 0 && free_tuples[0]) {
num_free_tuples[0]--;
sv = free_tuples[0];
sv->ob_size = 0;
Py_INCREF(sv);
#ifdef COUNT_ALLOCS
tuple_zero_allocs++;
#endif
tupledealloc(v);
*pv = (PyObject*) sv;
return 0;
}
if (0 < newsize && newsize < MAXSAVESIZE &&
(sv = free_tuples[newsize]) != NULL)
{
free_tuples[newsize] = (PyTupleObject *) sv->ob_item[0];
num_free_tuples[newsize]--;
#ifdef COUNT_ALLOCS
fast_tuple_allocs++;
#endif
#ifdef Py_TRACE_REFS
sv->ob_type = &PyTuple_Type;
#endif
for (i = 0; i < newsize; ++i){
sv->ob_item[i] = v->ob_item[i];
v->ob_item[i] = NULL;
}
sv->ob_size = v->ob_size;
tupledealloc(v);
*pv = (PyObject *) sv;
} else
#endif
{
sv = (PyTupleObject *)
realloc((char *)v,
sizeof(PyTupleObject) + newsize * sizeof(PyObject *));
*pv = (PyObject *) sv;
if (sv == NULL) {
PyMem_DEL(v);
PyErr_NoMemory();
return -1;
}
}
_Py_NewReference((PyObject *)sv);
for (i = sv->ob_size; i < newsize; i++)