mirror of https://github.com/python/cpython
Christian Tismer's "trashcan" patch:
Added wrapping macros to dictobject.c, listobject.c, tupleobject.c, frameobject.c, traceback.c that safely prevends core dumps on stack overflow. Macros and functions in object.c, object.h. The method is an "elevator destructor" that turns cascading deletes into tail recursive behavior when some limit is hit.
This commit is contained in:
parent
96a45adf80
commit
d724b23420
|
@ -514,6 +514,53 @@ it carefully, it may save lots of calls to Py_INCREF() and Py_DECREF() at
|
|||
times.
|
||||
*/
|
||||
|
||||
/*
|
||||
trashcan
|
||||
CT 2k0130
|
||||
non-recursively destroy nested objects
|
||||
|
||||
CT 2k0223
|
||||
redefinition for better locality and less overhead.
|
||||
|
||||
Objects that want to be recursion safe need to use
|
||||
the macroes
|
||||
Py_TRASHCAN_SAFE_BEGIN(name)
|
||||
and
|
||||
Py_TRASHCAN_SAFE_END(name)
|
||||
surrounding their actual deallocation code.
|
||||
|
||||
It would be nice to do this using the thread state.
|
||||
Also, we could do an exact stack measure then.
|
||||
Unfortunately, deallocations also take place when
|
||||
the thread state is undefined.
|
||||
*/
|
||||
|
||||
#define PyTrash_UNWIND_LEVEL 50
|
||||
|
||||
#define Py_TRASHCAN_SAFE_BEGIN(op) \
|
||||
{ \
|
||||
++_PyTrash_delete_nesting; \
|
||||
if (_PyTrash_delete_nesting < PyTrash_UNWIND_LEVEL) { \
|
||||
|
||||
#define Py_TRASHCAN_SAFE_END(op) \
|
||||
;} \
|
||||
else \
|
||||
_PyTrash_deposit_object((PyObject*)op);\
|
||||
--_PyTrash_delete_nesting; \
|
||||
if (_PyTrash_delete_later && _PyTrash_delete_nesting <= 0) \
|
||||
_PyTrash_destroy_list(); \
|
||||
} \
|
||||
|
||||
extern DL_IMPORT(void) _PyTrash_deposit_object Py_PROTO((PyObject*));
|
||||
extern DL_IMPORT(void) _PyTrash_destroy_list Py_PROTO(());
|
||||
|
||||
extern DL_IMPORT(int) _PyTrash_delete_nesting;
|
||||
extern DL_IMPORT(PyObject *) _PyTrash_delete_later;
|
||||
|
||||
/* swap the "xx" to check the speed loss */
|
||||
|
||||
#define xxPy_TRASHCAN_SAFE_BEGIN(op)
|
||||
#define xxPy_TRASHCAN_SAFE_END(op) ;
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -479,6 +479,7 @@ dict_dealloc(mp)
|
|||
{
|
||||
register int i;
|
||||
register dictentry *ep;
|
||||
Py_TRASHCAN_SAFE_BEGIN(mp)
|
||||
for (i = 0, ep = mp->ma_table; i < mp->ma_size; i++, ep++) {
|
||||
if (ep->me_key != NULL) {
|
||||
Py_DECREF(ep->me_key);
|
||||
|
@ -489,6 +490,7 @@ dict_dealloc(mp)
|
|||
}
|
||||
PyMem_XDEL(mp->ma_table);
|
||||
PyMem_DEL(mp);
|
||||
Py_TRASHCAN_SAFE_END(mp)
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -103,6 +103,7 @@ frame_dealloc(f)
|
|||
int i;
|
||||
PyObject **fastlocals;
|
||||
|
||||
Py_TRASHCAN_SAFE_BEGIN(f)
|
||||
/* Kill all local variables */
|
||||
fastlocals = f->f_localsplus;
|
||||
for (i = f->f_nlocals; --i >= 0; ++fastlocals) {
|
||||
|
@ -120,6 +121,7 @@ frame_dealloc(f)
|
|||
Py_XDECREF(f->f_exc_traceback);
|
||||
f->f_back = free_list;
|
||||
free_list = f;
|
||||
Py_TRASHCAN_SAFE_END(f)
|
||||
}
|
||||
|
||||
PyTypeObject PyFrame_Type = {
|
||||
|
|
|
@ -215,6 +215,7 @@ list_dealloc(op)
|
|||
PyListObject *op;
|
||||
{
|
||||
int i;
|
||||
Py_TRASHCAN_SAFE_BEGIN(op)
|
||||
if (op->ob_item != NULL) {
|
||||
/* Do it backwards, for Christian Tismer.
|
||||
There's a simple test case where somehow this reduces
|
||||
|
@ -227,6 +228,7 @@ list_dealloc(op)
|
|||
free((ANY *)op->ob_item);
|
||||
}
|
||||
free((ANY *)op);
|
||||
Py_TRASHCAN_SAFE_END(op)
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -906,3 +906,48 @@ Py_ReprLeave(obj)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
trashcan
|
||||
CT 2k0130
|
||||
non-recursively destroy nested objects
|
||||
|
||||
CT 2k0223
|
||||
everything is now done in a macro.
|
||||
|
||||
CT 2k0305
|
||||
modified to use functions, after Tim Peter's suggestion.
|
||||
|
||||
CT 2k0309
|
||||
modified to restore a possible error.
|
||||
*/
|
||||
|
||||
int _PyTrash_delete_nesting = 0;
|
||||
PyObject * _PyTrash_delete_later = NULL;
|
||||
|
||||
void
|
||||
_PyTrash_deposit_object(op)
|
||||
PyObject *op;
|
||||
{
|
||||
PyObject *error_type, *error_value, *error_traceback;
|
||||
PyErr_Fetch(&error_type, &error_value, &error_traceback);
|
||||
|
||||
if (!_PyTrash_delete_later)
|
||||
_PyTrash_delete_later = PyList_New(0);
|
||||
if (_PyTrash_delete_later)
|
||||
PyList_Append(_PyTrash_delete_later, (PyObject *)op);
|
||||
|
||||
PyErr_Restore(error_type, error_value, error_traceback);
|
||||
}
|
||||
|
||||
void
|
||||
_PyTrash_destroy_list()
|
||||
{
|
||||
while (_PyTrash_delete_later) {
|
||||
PyObject *shredder = _PyTrash_delete_later;
|
||||
_PyTrash_delete_later = NULL;
|
||||
++_PyTrash_delete_nesting;
|
||||
Py_DECREF(shredder);
|
||||
--_PyTrash_delete_nesting;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,6 +172,7 @@ tupledealloc(op)
|
|||
{
|
||||
register int i;
|
||||
|
||||
Py_TRASHCAN_SAFE_BEGIN(op)
|
||||
if (op->ob_size > 0) {
|
||||
i = op->ob_size;
|
||||
while (--i >= 0)
|
||||
|
@ -180,11 +181,13 @@ tupledealloc(op)
|
|||
if (op->ob_size < MAXSAVESIZE) {
|
||||
op->ob_item[0] = (PyObject *) free_tuples[op->ob_size];
|
||||
free_tuples[op->ob_size] = op;
|
||||
return;
|
||||
goto done; /* return */
|
||||
}
|
||||
#endif
|
||||
}
|
||||
free((ANY *)op);
|
||||
done:
|
||||
Py_TRASHCAN_SAFE_END(op)
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -68,9 +68,11 @@ static void
|
|||
tb_dealloc(tb)
|
||||
tracebackobject *tb;
|
||||
{
|
||||
Py_TRASHCAN_SAFE_BEGIN(tb)
|
||||
Py_XDECREF(tb->tb_next);
|
||||
Py_XDECREF(tb->tb_frame);
|
||||
PyMem_DEL(tb);
|
||||
Py_TRASHCAN_SAFE_END(tb)
|
||||
}
|
||||
|
||||
#define Tracebacktype PyTraceBack_Type
|
||||
|
|
Loading…
Reference in New Issue