gh-117376: Partial implementation of deferred reference counting (#117696)

This marks objects as using deferred refrence counting using the
`ob_gc_bits` field in the free-threaded build and collects those objects
during GC.
This commit is contained in:
Sam Gross 2024-04-12 13:36:20 -04:00 committed by GitHub
parent c50cb6dd09
commit 4ad8f090cc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 82 additions and 21 deletions

View File

@ -39,12 +39,13 @@ static inline PyObject* _Py_FROM_GC(PyGC_Head *gc) {
/* Bit flags for ob_gc_bits (in Py_GIL_DISABLED builds) */ /* Bit flags for ob_gc_bits (in Py_GIL_DISABLED builds) */
#ifdef Py_GIL_DISABLED #ifdef Py_GIL_DISABLED
# define _PyGC_BITS_TRACKED (1) # define _PyGC_BITS_TRACKED (1) // Tracked by the GC
# define _PyGC_BITS_FINALIZED (2) # define _PyGC_BITS_FINALIZED (2) // tp_finalize was called
# define _PyGC_BITS_UNREACHABLE (4) # define _PyGC_BITS_UNREACHABLE (4)
# define _PyGC_BITS_FROZEN (8) # define _PyGC_BITS_FROZEN (8)
# define _PyGC_BITS_SHARED (16) # define _PyGC_BITS_SHARED (16)
# define _PyGC_BITS_SHARED_INLINE (32) # define _PyGC_BITS_SHARED_INLINE (32)
# define _PyGC_BITS_DEFERRED (64) // Use deferred reference counting
#endif #endif
/* True if the object is currently tracked by the GC. */ /* True if the object is currently tracked by the GC. */

View File

@ -158,6 +158,21 @@ static inline void _Py_ClearImmortal(PyObject *op)
op = NULL; \ op = NULL; \
} while (0) } while (0)
// Mark an object as supporting deferred reference counting. This is a no-op
// in the default (with GIL) build. Objects that use deferred reference
// counting should be tracked by the GC so that they are eventually collected.
extern void _PyObject_SetDeferredRefcount(PyObject *op);
static inline int
_PyObject_HasDeferredRefcount(PyObject *op)
{
#ifdef Py_GIL_DISABLED
return (op->ob_gc_bits & _PyGC_BITS_DEFERRED) != 0;
#else
return 0;
#endif
}
#if !defined(Py_GIL_DISABLED) #if !defined(Py_GIL_DISABLED)
static inline void static inline void
_Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct) _Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct)

View File

@ -834,6 +834,7 @@ if check_impl_detail(cpython=True) and ctypes is not None:
SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(100)) SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(100))
del f del f
gc_collect() # For free-threaded build
self.assertEqual(LAST_FREED, 100) self.assertEqual(LAST_FREED, 100)
def test_get_set(self): def test_get_set(self):
@ -872,6 +873,7 @@ if check_impl_detail(cpython=True) and ctypes is not None:
del f del f
tt.start() tt.start()
tt.join() tt.join()
gc_collect() # For free-threaded build
self.assertEqual(LAST_FREED, 500) self.assertEqual(LAST_FREED, 500)

View File

@ -909,6 +909,7 @@ descr_new(PyTypeObject *descrtype, PyTypeObject *type, const char *name)
descr = (PyDescrObject *)PyType_GenericAlloc(descrtype, 0); descr = (PyDescrObject *)PyType_GenericAlloc(descrtype, 0);
if (descr != NULL) { if (descr != NULL) {
_PyObject_SetDeferredRefcount((PyObject *)descr);
descr->d_type = (PyTypeObject*)Py_XNewRef(type); descr->d_type = (PyTypeObject*)Py_XNewRef(type);
descr->d_name = PyUnicode_InternFromString(name); descr->d_name = PyUnicode_InternFromString(name);
if (descr->d_name == NULL) { if (descr->d_name == NULL) {

View File

@ -127,6 +127,9 @@ _PyFunction_FromConstructor(PyFrameConstructor *constr)
op->func_typeparams = NULL; op->func_typeparams = NULL;
op->vectorcall = _PyFunction_Vectorcall; op->vectorcall = _PyFunction_Vectorcall;
op->func_version = 0; op->func_version = 0;
// NOTE: functions created via FrameConstructor do not use deferred
// reference counting because they are typically not part of cycles
// nor accessed by multiple threads.
_PyObject_GC_TRACK(op); _PyObject_GC_TRACK(op);
handle_func_event(PyFunction_EVENT_CREATE, op, NULL); handle_func_event(PyFunction_EVENT_CREATE, op, NULL);
return op; return op;
@ -202,6 +205,12 @@ PyFunction_NewWithQualName(PyObject *code, PyObject *globals, PyObject *qualname
op->func_typeparams = NULL; op->func_typeparams = NULL;
op->vectorcall = _PyFunction_Vectorcall; op->vectorcall = _PyFunction_Vectorcall;
op->func_version = 0; op->func_version = 0;
if ((code_obj->co_flags & CO_NESTED) == 0) {
// Use deferred reference counting for top-level functions, but not
// nested functions because they are more likely to capture variables,
// which makes prompt deallocation more important.
_PyObject_SetDeferredRefcount((PyObject *)op);
}
_PyObject_GC_TRACK(op); _PyObject_GC_TRACK(op);
handle_func_event(PyFunction_EVENT_CREATE, op, NULL); handle_func_event(PyFunction_EVENT_CREATE, op, NULL);
return (PyObject *)op; return (PyObject *)op;

View File

@ -88,21 +88,31 @@ new_module_notrack(PyTypeObject *mt)
m->md_weaklist = NULL; m->md_weaklist = NULL;
m->md_name = NULL; m->md_name = NULL;
m->md_dict = PyDict_New(); m->md_dict = PyDict_New();
if (m->md_dict != NULL) { if (m->md_dict == NULL) {
return m;
}
Py_DECREF(m); Py_DECREF(m);
return NULL; return NULL;
} }
return m;
}
static void
track_module(PyModuleObject *m)
{
_PyObject_SetDeferredRefcount(m->md_dict);
PyObject_GC_Track(m->md_dict);
_PyObject_SetDeferredRefcount((PyObject *)m);
PyObject_GC_Track(m);
}
static PyObject * static PyObject *
new_module(PyTypeObject *mt, PyObject *args, PyObject *kws) new_module(PyTypeObject *mt, PyObject *args, PyObject *kws)
{ {
PyObject *m = (PyObject *)new_module_notrack(mt); PyModuleObject *m = new_module_notrack(mt);
if (m != NULL) { if (m != NULL) {
PyObject_GC_Track(m); track_module(m);
} }
return m; return (PyObject *)m;
} }
PyObject * PyObject *
@ -113,7 +123,7 @@ PyModule_NewObject(PyObject *name)
return NULL; return NULL;
if (module_init_dict(m, m->md_dict, name, NULL) != 0) if (module_init_dict(m, m->md_dict, name, NULL) != 0)
goto fail; goto fail;
PyObject_GC_Track(m); track_module(m);
return (PyObject *)m; return (PyObject *)m;
fail: fail:
@ -705,16 +715,7 @@ static int
module___init___impl(PyModuleObject *self, PyObject *name, PyObject *doc) module___init___impl(PyModuleObject *self, PyObject *name, PyObject *doc)
/*[clinic end generated code: output=e7e721c26ce7aad7 input=57f9e177401e5e1e]*/ /*[clinic end generated code: output=e7e721c26ce7aad7 input=57f9e177401e5e1e]*/
{ {
PyObject *dict = self->md_dict; return module_init_dict(self, self->md_dict, name, doc);
if (dict == NULL) {
dict = PyDict_New();
if (dict == NULL)
return -1;
self->md_dict = dict;
}
if (module_init_dict(self, dict, name, doc) < 0)
return -1;
return 0;
} }
static void static void

View File

@ -2424,6 +2424,19 @@ _Py_SetImmortal(PyObject *op)
_Py_SetImmortalUntracked(op); _Py_SetImmortalUntracked(op);
} }
void
_PyObject_SetDeferredRefcount(PyObject *op)
{
#ifdef Py_GIL_DISABLED
assert(PyType_IS_GC(Py_TYPE(op)));
assert(_Py_IsOwnedByCurrentThread(op));
assert(op->ob_ref_shared == 0);
op->ob_gc_bits |= _PyGC_BITS_DEFERRED;
op->ob_ref_local += 1;
op->ob_ref_shared = _Py_REF_QUEUED;
#endif
}
void void
_Py_ResurrectReference(PyObject *op) _Py_ResurrectReference(PyObject *op)
{ {

View File

@ -3581,6 +3581,8 @@ type_new_alloc(type_new_ctx *ctx)
et->ht_module = NULL; et->ht_module = NULL;
et->_ht_tpname = NULL; et->_ht_tpname = NULL;
_PyObject_SetDeferredRefcount((PyObject *)et);
return type; return type;
} }

View File

@ -159,6 +159,15 @@ gc_decref(PyObject *op)
op->ob_tid -= 1; op->ob_tid -= 1;
} }
static void
disable_deferred_refcounting(PyObject *op)
{
if (_PyObject_HasDeferredRefcount(op)) {
op->ob_gc_bits &= ~_PyGC_BITS_DEFERRED;
op->ob_ref_shared -= (1 << _Py_REF_SHARED_SHIFT);
}
}
static Py_ssize_t static Py_ssize_t
merge_refcount(PyObject *op, Py_ssize_t extra) merge_refcount(PyObject *op, Py_ssize_t extra)
{ {
@ -375,9 +384,10 @@ update_refs(const mi_heap_t *heap, const mi_heap_area_t *area,
} }
Py_ssize_t refcount = Py_REFCNT(op); Py_ssize_t refcount = Py_REFCNT(op);
refcount -= _PyObject_HasDeferredRefcount(op);
_PyObject_ASSERT(op, refcount >= 0); _PyObject_ASSERT(op, refcount >= 0);
if (refcount > 0) { if (refcount > 0 && !_PyObject_HasDeferredRefcount(op)) {
// Untrack tuples and dicts as necessary in this pass, but not objects // Untrack tuples and dicts as necessary in this pass, but not objects
// with zero refcount, which we will want to collect. // with zero refcount, which we will want to collect.
if (PyTuple_CheckExact(op)) { if (PyTuple_CheckExact(op)) {
@ -466,6 +476,9 @@ mark_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
return true; return true;
} }
_PyObject_ASSERT_WITH_MSG(op, gc_get_refs(op) >= 0,
"refcount is too small");
if (gc_is_unreachable(op) && gc_get_refs(op) != 0) { if (gc_is_unreachable(op) && gc_get_refs(op) != 0) {
// Object is reachable but currently marked as unreachable. // Object is reachable but currently marked as unreachable.
// Mark it as reachable and traverse its pointers to find // Mark it as reachable and traverse its pointers to find
@ -499,6 +512,10 @@ scan_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
struct collection_state *state = (struct collection_state *)args; struct collection_state *state = (struct collection_state *)args;
if (gc_is_unreachable(op)) { if (gc_is_unreachable(op)) {
// Disable deferred refcounting for unreachable objects so that they
// are collected immediately after finalization.
disable_deferred_refcounting(op);
// Merge and add one to the refcount to prevent deallocation while we // Merge and add one to the refcount to prevent deallocation while we
// are holding on to it in a worklist. // are holding on to it in a worklist.
merge_refcount(op, 1); merge_refcount(op, 1);