bpo-46753: Add the empty tuple to the _PyRuntimeState.global_objects. (gh-31345)

https://bugs.python.org/issue46753
This commit is contained in:
Eric Snow 2022-02-28 15:15:48 -07:00 committed by GitHub
parent d5b7bba43b
commit 08deed1af5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 218 additions and 204 deletions

View File

@ -20,6 +20,7 @@ typedef struct {
} PyGC_Head; } PyGC_Head;
#define _Py_AS_GC(o) ((PyGC_Head *)(o)-1) #define _Py_AS_GC(o) ((PyGC_Head *)(o)-1)
#define _PyGC_Head_UNUSED PyGC_Head
/* True if the object is currently tracked by the GC. */ /* True if the object is currently tracked by the GC. */
#define _PyObject_GC_IS_TRACKED(o) (_Py_AS_GC(o)->_gc_next != 0) #define _PyObject_GC_IS_TRACKED(o) (_Py_AS_GC(o)->_gc_next != 0)

View File

@ -8,6 +8,7 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define" # error "this header requires Py_BUILD_CORE define"
#endif #endif
#include "pycore_gc.h" // PyGC_Head
#include "pycore_global_strings.h" // struct _Py_global_strings #include "pycore_global_strings.h" // struct _Py_global_strings
@ -40,6 +41,9 @@ struct _Py_global_objects {
} bytes_characters[256]; } bytes_characters[256];
struct _Py_global_strings strings; struct _Py_global_strings strings;
_PyGC_Head_UNUSED _tuple_empty_gc_not_used;
PyTupleObject tuple_empty;
} singletons; } singletons;
}; };

View File

@ -964,6 +964,10 @@ extern "C" {
INIT_ID(zipimporter), \ INIT_ID(zipimporter), \
}, \ }, \
}, \ }, \
\
.tuple_empty = { \
.ob_base = _PyVarObject_IMMORTAL_INIT(&PyTuple_Type, 0) \
}, \
}, \ }, \
} }
/* End auto-generated code */ /* End auto-generated code */

View File

@ -20,30 +20,45 @@ extern void _PyTuple_Fini(PyInterpreterState *);
/* other API */ /* other API */
#ifndef WITH_FREELISTS // PyTuple_MAXSAVESIZE - largest tuple to save on free list
// without freelists // PyTuple_MAXFREELIST - maximum number of tuples of each size to save
// for tuples only store empty tuple singleton
# define PyTuple_MAXSAVESIZE 1
# define PyTuple_MAXFREELIST 1
#endif
/* Speed optimization to avoid frequent malloc/free of small tuples */ #if defined(PyTuple_MAXSAVESIZE) && PyTuple_MAXSAVESIZE <= 0
#ifndef PyTuple_MAXSAVESIZE // A build indicated that tuple freelists should not be used.
// Largest tuple to save on free list # define PyTuple_NFREELISTS 0
# define PyTuple_MAXSAVESIZE 20 # undef PyTuple_MAXSAVESIZE
#endif # undef PyTuple_MAXFREELIST
#ifndef PyTuple_MAXFREELIST
// Maximum number of tuples of each size to save #elif !defined(WITH_FREELISTS)
# define PyTuple_MAXFREELIST 2000 # define PyTuple_NFREELISTS 0
# undef PyTuple_MAXSAVESIZE
# undef PyTuple_MAXFREELIST
#else
// We are using a freelist for tuples.
# ifndef PyTuple_MAXSAVESIZE
# define PyTuple_MAXSAVESIZE 20
# endif
# define PyTuple_NFREELISTS PyTuple_MAXSAVESIZE
# ifndef PyTuple_MAXFREELIST
# define PyTuple_MAXFREELIST 2000
# endif
#endif #endif
struct _Py_tuple_state { struct _Py_tuple_state {
#if PyTuple_MAXSAVESIZE > 0 #if PyTuple_NFREELISTS > 0
/* Entries 1 up to PyTuple_MAXSAVESIZE are free lists, /* There is one freelist for each size from 1 to PyTuple_MAXSAVESIZE.
entry 0 is the empty tuple () of which at most one instance The empty tuple is handled separately.
will be allocated. */
PyTupleObject *free_list[PyTuple_MAXSAVESIZE]; Each tuple stored in the array is the head of the linked list
int numfree[PyTuple_MAXSAVESIZE]; (and the next available tuple) for that size. The actual tuple
object is used as the linked list node, with its first item
(ob_item[0]) pointing to the next node (i.e. the previous head).
Each linked list is initially NULL. */
PyTupleObject *free_list[PyTuple_NFREELISTS];
int numfree[PyTuple_NFREELISTS];
#else
char _unused; // Empty structs are not allowed.
#endif #endif
}; };

View File

@ -7,7 +7,6 @@
#include "pycore_initconfig.h" // _PyStatus_OK() #include "pycore_initconfig.h" // _PyStatus_OK()
#include "pycore_object.h" // _PyObject_GC_TRACK() #include "pycore_object.h" // _PyObject_GC_TRACK()
#include "pycore_pyerrors.h" // _Py_FatalRefcountError() #include "pycore_pyerrors.h" // _Py_FatalRefcountError()
#include "pycore_tuple.h" // struct _Py_tuple_state()
/*[clinic input] /*[clinic input]
class tuple "PyTupleObject *" "&PyTuple_Type" class tuple "PyTupleObject *" "&PyTuple_Type"
@ -17,32 +16,10 @@ class tuple "PyTupleObject *" "&PyTuple_Type"
#include "clinic/tupleobject.c.h" #include "clinic/tupleobject.c.h"
#if PyTuple_MAXSAVESIZE > 0 static inline PyTupleObject * maybe_freelist_pop(Py_ssize_t);
static struct _Py_tuple_state * static inline int maybe_freelist_push(PyTupleObject *);
get_tuple_state(void)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
return &interp->tuple;
}
#endif
/* Print summary info about the state of the optimized allocator */
void
_PyTuple_DebugMallocStats(FILE *out)
{
#if PyTuple_MAXSAVESIZE > 0
struct _Py_tuple_state *state = get_tuple_state();
for (int i = 1; i < PyTuple_MAXSAVESIZE; i++) {
char buf[128];
PyOS_snprintf(buf, sizeof(buf),
"free %d-sized PyTupleObject", i);
_PyDebugAllocatorStats(out, buf, state->numfree[i],
_PyObject_VAR_SIZE(&PyTuple_Type, i));
}
#endif
}
/* Allocate an uninitialized tuple object. Before making it public, following /* Allocate an uninitialized tuple object. Before making it public, following
steps must be done: steps must be done:
@ -56,38 +33,16 @@ _PyTuple_DebugMallocStats(FILE *out)
static PyTupleObject * static PyTupleObject *
tuple_alloc(Py_ssize_t size) tuple_alloc(Py_ssize_t size)
{ {
PyTupleObject *op;
#if PyTuple_MAXSAVESIZE > 0
// If Python is built with the empty tuple singleton,
// tuple_alloc(0) must not be called.
assert(size != 0);
#endif
if (size < 0) { if (size < 0) {
PyErr_BadInternalCall(); PyErr_BadInternalCall();
return NULL; return NULL;
} }
// Check for max save size > 1. Empty tuple singleton is special case.
#if PyTuple_MAXSAVESIZE > 1
struct _Py_tuple_state *state = get_tuple_state();
#ifdef Py_DEBUG #ifdef Py_DEBUG
// tuple_alloc() must not be called after _PyTuple_Fini() assert(size != 0); // The empty tuple is statically allocated.
assert(state->numfree[0] != -1);
#endif #endif
if (size < PyTuple_MAXSAVESIZE && (op = state->free_list[size]) != NULL) {
assert(size != 0); PyTupleObject *op = maybe_freelist_pop(size);
state->free_list[size] = (PyTupleObject *) op->ob_item[0]; if (op == NULL) {
state->numfree[size]--;
/* Inlined _PyObject_InitVar() without _PyType_HasFeature() test */
#ifdef Py_TRACE_REFS
Py_SET_SIZE(op, size);
Py_SET_TYPE(op, &PyTuple_Type);
#endif
_Py_NewReference((PyObject *)op);
}
else
#endif
{
/* Check for overflow */ /* Check for overflow */
if ((size_t)size > ((size_t)PY_SSIZE_T_MAX - (sizeof(PyTupleObject) - if ((size_t)size > ((size_t)PY_SSIZE_T_MAX - (sizeof(PyTupleObject) -
sizeof(PyObject *))) / sizeof(PyObject *)) { sizeof(PyObject *))) / sizeof(PyObject *)) {
@ -100,58 +55,24 @@ tuple_alloc(Py_ssize_t size)
return op; return op;
} }
static int // The empty tuple singleton is not tracked by the GC.
tuple_create_empty_tuple_singleton(struct _Py_tuple_state *state) // It does not contain any Python object.
{ // Note that tuple subclasses have their own empty instances.
#if PyTuple_MAXSAVESIZE > 0
assert(state->free_list[0] == NULL);
PyTupleObject *op = PyObject_GC_NewVar(PyTupleObject, &PyTuple_Type, 0); static inline PyObject *
if (op == NULL) {
return -1;
}
// The empty tuple singleton is not tracked by the GC.
// It does not contain any Python object.
state->free_list[0] = op;
state->numfree[0]++;
assert(state->numfree[0] == 1);
#endif
return 0;
}
static PyObject *
tuple_get_empty(void) tuple_get_empty(void)
{ {
#if PyTuple_MAXSAVESIZE > 0 Py_INCREF(&_Py_SINGLETON(tuple_empty));
struct _Py_tuple_state *state = get_tuple_state(); return (PyObject *)&_Py_SINGLETON(tuple_empty);
PyTupleObject *op = state->free_list[0];
// tuple_get_empty() must not be called before _PyTuple_Init()
// or after _PyTuple_Fini()
assert(op != NULL);
#ifdef Py_DEBUG
assert(state->numfree[0] != -1);
#endif
Py_INCREF(op);
return (PyObject *) op;
#else
return PyTuple_New(0);
#endif
} }
PyObject * PyObject *
PyTuple_New(Py_ssize_t size) PyTuple_New(Py_ssize_t size)
{ {
PyTupleObject *op; PyTupleObject *op;
#if PyTuple_MAXSAVESIZE > 0
if (size == 0) { if (size == 0) {
return tuple_get_empty(); return tuple_get_empty();
} }
#endif
op = tuple_alloc(size); op = tuple_alloc(size);
if (op == NULL) { if (op == NULL) {
return NULL; return NULL;
@ -265,47 +186,33 @@ PyTuple_Pack(Py_ssize_t n, ...)
static void static void
tupledealloc(PyTupleObject *op) tupledealloc(PyTupleObject *op)
{ {
Py_ssize_t len = Py_SIZE(op); if (Py_SIZE(op) == 0) {
/* The empty tuple is statically allocated. */
if (op == &_Py_SINGLETON(tuple_empty)) {
#ifdef Py_DEBUG
_Py_FatalRefcountError("deallocating the empty tuple singleton");
#else
return;
#endif
}
#ifdef Py_DEBUG
/* tuple subclasses have their own empty instances. */
assert(!PyTuple_CheckExact(op));
#endif
}
PyObject_GC_UnTrack(op); PyObject_GC_UnTrack(op);
Py_TRASHCAN_BEGIN(op, tupledealloc) Py_TRASHCAN_BEGIN(op, tupledealloc)
if (len > 0) {
Py_ssize_t i = len;
while (--i >= 0) {
Py_XDECREF(op->ob_item[i]);
}
#if PyTuple_MAXSAVESIZE > 0
struct _Py_tuple_state *state = get_tuple_state();
#ifdef Py_DEBUG
// tupledealloc() must not be called after _PyTuple_Fini()
assert(state->numfree[0] != -1);
#endif
if (len < PyTuple_MAXSAVESIZE
&& state->numfree[len] < PyTuple_MAXFREELIST
&& Py_IS_TYPE(op, &PyTuple_Type))
{
op->ob_item[0] = (PyObject *) state->free_list[len];
state->numfree[len]++;
state->free_list[len] = op;
goto done; /* return */
}
#endif
}
#if defined(Py_DEBUG) && PyTuple_MAXSAVESIZE > 0
else {
assert(len == 0);
struct _Py_tuple_state *state = get_tuple_state();
// The empty tuple singleton must only be deallocated by
// _PyTuple_Fini(): not before, not after
if (op == state->free_list[0] && state->numfree[0] != 0) {
_Py_FatalRefcountError("deallocating the empty tuple singleton");
}
}
#endif
Py_TYPE(op)->tp_free((PyObject *)op);
#if PyTuple_MAXSAVESIZE > 0 Py_ssize_t i = Py_SIZE(op);
done: while (--i >= 0) {
#endif Py_XDECREF(op->ob_item[i]);
}
// This will abort on the empty singleton (if there is one).
if (!maybe_freelist_push(op)) {
Py_TYPE(op)->tp_free((PyObject *)op);
}
Py_TRASHCAN_END Py_TRASHCAN_END
} }
@ -838,6 +745,7 @@ tuple_subtype_new(PyTypeObject *type, PyObject *iterable)
if (tmp == NULL) if (tmp == NULL)
return NULL; return NULL;
assert(PyTuple_Check(tmp)); assert(PyTuple_Check(tmp));
/* This may allocate an empty tuple that is not the global one. */
newobj = type->tp_alloc(type, n = PyTuple_GET_SIZE(tmp)); newobj = type->tp_alloc(type, n = PyTuple_GET_SIZE(tmp));
if (newobj == NULL) { if (newobj == NULL) {
Py_DECREF(tmp); Py_DECREF(tmp);
@ -1020,14 +928,22 @@ _PyTuple_Resize(PyObject **pv, Py_ssize_t newsize)
PyErr_BadInternalCall(); PyErr_BadInternalCall();
return -1; return -1;
} }
oldsize = Py_SIZE(v);
if (oldsize == newsize)
return 0;
oldsize = Py_SIZE(v);
if (oldsize == newsize) {
return 0;
}
if (newsize == 0) {
Py_DECREF(v);
*pv = tuple_get_empty();
return 0;
}
if (oldsize == 0) { if (oldsize == 0) {
/* Empty tuples are often shared, so we should never #ifdef Py_DEBUG
resize them in-place even if we do own the only assert(v == &_Py_SINGLETON(tuple_empty));
(current) reference */ #endif
/* The empty tuple is statically allocated so we never
resize it in-place. */
Py_DECREF(v); Py_DECREF(v);
*pv = PyTuple_New(newsize); *pv = PyTuple_New(newsize);
return *pv == NULL ? -1 : 0; return *pv == NULL ? -1 : 0;
@ -1063,36 +979,6 @@ _PyTuple_Resize(PyObject **pv, Py_ssize_t newsize)
return 0; return 0;
} }
void
_PyTuple_ClearFreeList(PyInterpreterState *interp)
{
#if PyTuple_MAXSAVESIZE > 0
struct _Py_tuple_state *state = &interp->tuple;
for (Py_ssize_t i = 1; i < PyTuple_MAXSAVESIZE; i++) {
PyTupleObject *p = state->free_list[i];
state->free_list[i] = NULL;
state->numfree[i] = 0;
while (p) {
PyTupleObject *q = p;
p = (PyTupleObject *)(p->ob_item[0]);
PyObject_GC_Del(q);
}
}
// the empty tuple singleton is only cleared by _PyTuple_Fini()
#endif
}
PyStatus
_PyTuple_InitGlobalObjects(PyInterpreterState *interp)
{
struct _Py_tuple_state *state = &interp->tuple;
if (tuple_create_empty_tuple_singleton(state) < 0) {
return _PyStatus_NO_MEMORY();
}
return _PyStatus_OK();
}
PyStatus PyStatus
_PyTuple_InitTypes(PyInterpreterState *interp) _PyTuple_InitTypes(PyInterpreterState *interp)
@ -1112,24 +998,18 @@ _PyTuple_InitTypes(PyInterpreterState *interp)
return _PyStatus_OK(); return _PyStatus_OK();
} }
static void maybe_freelist_clear(PyInterpreterState *, int);
void void
_PyTuple_Fini(PyInterpreterState *interp) _PyTuple_Fini(PyInterpreterState *interp)
{ {
#if PyTuple_MAXSAVESIZE > 0 maybe_freelist_clear(interp, 1);
struct _Py_tuple_state *state = &interp->tuple; }
// The empty tuple singleton must not be tracked by the GC
assert(!_PyObject_GC_IS_TRACKED(state->free_list[0]));
#ifdef Py_DEBUG void
state->numfree[0] = 0; _PyTuple_ClearFreeList(PyInterpreterState *interp)
#endif {
Py_CLEAR(state->free_list[0]); maybe_freelist_clear(interp, 0);
#ifdef Py_DEBUG
state->numfree[0] = -1;
#endif
_PyTuple_ClearFreeList(interp);
#endif
} }
/*********************** Tuple Iterator **************************/ /*********************** Tuple Iterator **************************/
@ -1277,3 +1157,113 @@ tuple_iter(PyObject *seq)
_PyObject_GC_TRACK(it); _PyObject_GC_TRACK(it);
return (PyObject *)it; return (PyObject *)it;
} }
/*************
* freelists *
*************/
#define STATE (interp->tuple)
#define FREELIST_FINALIZED (STATE.numfree[0] < 0)
static inline PyTupleObject *
maybe_freelist_pop(Py_ssize_t size)
{
#if PyTuple_NFREELISTS > 0
PyInterpreterState *interp = _PyInterpreterState_GET();
#ifdef Py_DEBUG
/* maybe_freelist_pop() must not be called after maybe_freelist_fini(). */
assert(!FREELIST_FINALIZED);
#endif
if (size == 0) {
return NULL;
}
assert(size > 0);
if (size < PyTuple_MAXSAVESIZE) {
Py_ssize_t index = size - 1;
PyTupleObject *op = STATE.free_list[index];
if (op != NULL) {
/* op is the head of a linked list, with the first item
pointing to the next node. Here we pop off the old head. */
STATE.free_list[index] = (PyTupleObject *) op->ob_item[0];
STATE.numfree[index]--;
/* Inlined _PyObject_InitVar() without _PyType_HasFeature() test */
#ifdef Py_TRACE_REFS
/* maybe_freelist_push() ensures these were already set. */
// XXX Can we drop these? See commit 68055ce6fe01 (GvR, Dec 1998).
Py_SET_SIZE(op, size);
Py_SET_TYPE(op, &PyTuple_Type);
#endif
_Py_NewReference((PyObject *)op);
/* END inlined _PyObject_InitVar() */
return op;
}
}
#endif
return NULL;
}
static inline int
maybe_freelist_push(PyTupleObject *op)
{
#if PyTuple_NFREELISTS > 0
PyInterpreterState *interp = _PyInterpreterState_GET();
#ifdef Py_DEBUG
/* maybe_freelist_push() must not be called after maybe_freelist_fini(). */
assert(!FREELIST_FINALIZED);
#endif
if (Py_SIZE(op) == 0) {
return 0;
}
Py_ssize_t index = Py_SIZE(op) - 1;
if (index < PyTuple_NFREELISTS
&& STATE.numfree[index] < PyTuple_MAXFREELIST
&& Py_IS_TYPE(op, &PyTuple_Type))
{
/* op is the head of a linked list, with the first item
pointing to the next node. Here we set op as the new head. */
op->ob_item[0] = (PyObject *) STATE.free_list[index];
STATE.free_list[index] = op;
STATE.numfree[index]++;
return 1;
}
#endif
return 0;
}
static void
maybe_freelist_clear(PyInterpreterState *interp, int fini)
{
#if PyTuple_NFREELISTS > 0
for (Py_ssize_t i = 0; i < PyTuple_NFREELISTS; i++) {
PyTupleObject *p = STATE.free_list[i];
STATE.free_list[i] = NULL;
STATE.numfree[i] = fini ? -1 : 0;
while (p) {
PyTupleObject *q = p;
p = (PyTupleObject *)(p->ob_item[0]);
PyObject_GC_Del(q);
}
}
#endif
}
/* Print summary info about the state of the optimized allocator */
void
_PyTuple_DebugMallocStats(FILE *out)
{
#if PyTuple_NFREELISTS > 0
PyInterpreterState *interp = _PyInterpreterState_GET();
for (int i = 0; i < PyTuple_NFREELISTS; i++) {
int len = i + 1;
char buf[128];
PyOS_snprintf(buf, sizeof(buf),
"free %d-sized PyTupleObject", len);
_PyDebugAllocatorStats(out, buf, STATE.numfree[i],
_PyObject_VAR_SIZE(&PyTuple_Type, len));
}
#endif
}
#undef STATE
#undef FREELIST_FINALIZED

View File

@ -682,11 +682,6 @@ pycore_init_global_objects(PyInterpreterState *interp)
_PyUnicode_InitState(interp); _PyUnicode_InitState(interp);
status = _PyTuple_InitGlobalObjects(interp);
if (_PyStatus_EXCEPTION(status)) {
return status;
}
return _PyStatus_OK(); return _PyStatus_OK();
} }

View File

@ -287,6 +287,8 @@ class Printer:
return f"& {name}.ob_base" return f"& {name}.ob_base"
def generate_tuple(self, name: str, t: Tuple[object, ...]) -> str: def generate_tuple(self, name: str, t: Tuple[object, ...]) -> str:
if len(t) == 0:
return f"(PyObject *)& _Py_SINGLETON(tuple_empty)"
items = [self.generate(f"{name}_{i}", it) for i, it in enumerate(t)] items = [self.generate(f"{name}_{i}", it) for i, it in enumerate(t)]
self.write("static") self.write("static")
with self.indent(): with self.indent():

View File

@ -252,6 +252,9 @@ def generate_runtime_init(identifiers, strings):
for name in sorted(identifiers): for name in sorted(identifiers):
assert name.isidentifier(), name assert name.isidentifier(), name
printer.write(f'INIT_ID({name}),') printer.write(f'INIT_ID({name}),')
printer.write('')
with printer.block('.tuple_empty =', ','):
printer.write('.ob_base = _PyVarObject_IMMORTAL_INIT(&PyTuple_Type, 0)')
printer.write(END) printer.write(END)
printer.write(after) printer.write(after)