gh-121794: Don't set `ob_tid` to zero in fast-path dealloc (#121799)

We should maintain the invariant that a zero `ob_tid` implies the
refcount fields are merged.

* Move the assignment in `_Py_MergeZeroLocalRefcount` to immediately
  before the refcount merge.
* Update `_PyTrash_thread_destroy_chain` to set `ob_ref_shared` to
  `_Py_REF_MERGED` when setting `ob_tid` to zero.

Also check this invariant with assertions in the GC in debug builds.
That uncovered a bug when running out of memory during GC.
This commit is contained in:
Sam Gross 2024-07-15 17:50:10 -04:00 committed by GitHub
parent 82a4dac9f6
commit d23be3947c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 62 additions and 13 deletions

View File

@ -0,0 +1,2 @@
Fix bug in free-threaded Python where a resurrected object could lead to
a negative ref count assertion failure.

View File

@ -375,7 +375,6 @@ _Py_MergeZeroLocalRefcount(PyObject *op)
{ {
assert(op->ob_ref_local == 0); assert(op->ob_ref_local == 0);
_Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);
Py_ssize_t shared = _Py_atomic_load_ssize_acquire(&op->ob_ref_shared); Py_ssize_t shared = _Py_atomic_load_ssize_acquire(&op->ob_ref_shared);
if (shared == 0) { if (shared == 0) {
// Fast-path: shared refcount is zero (including flags) // Fast-path: shared refcount is zero (including flags)
@ -383,6 +382,11 @@ _Py_MergeZeroLocalRefcount(PyObject *op)
return; return;
} }
// gh-121794: This must be before the store to `ob_ref_shared` (gh-119999),
// but should outside the fast-path to maintain the invariant that
// a zero `ob_tid` implies a merged refcount.
_Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);
// Slow-path: atomically set the flags (low two bits) to _Py_REF_MERGED. // Slow-path: atomically set the flags (low two bits) to _Py_REF_MERGED.
Py_ssize_t new_shared; Py_ssize_t new_shared;
do { do {
@ -2724,7 +2728,6 @@ _PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op)
_PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op)); _PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op));
_PyObject_ASSERT(op, Py_REFCNT(op) == 0); _PyObject_ASSERT(op, Py_REFCNT(op) == 0);
#ifdef Py_GIL_DISABLED #ifdef Py_GIL_DISABLED
_PyObject_ASSERT(op, op->ob_tid == 0);
op->ob_tid = (uintptr_t)tstate->delete_later; op->ob_tid = (uintptr_t)tstate->delete_later;
#else #else
_PyGCHead_SET_PREV(_Py_AS_GC(op), (PyGC_Head*)tstate->delete_later); _PyGCHead_SET_PREV(_Py_AS_GC(op), (PyGC_Head*)tstate->delete_later);
@ -2757,6 +2760,7 @@ _PyTrash_thread_destroy_chain(PyThreadState *tstate)
#ifdef Py_GIL_DISABLED #ifdef Py_GIL_DISABLED
tstate->delete_later = (PyObject*) op->ob_tid; tstate->delete_later = (PyObject*) op->ob_tid;
op->ob_tid = 0; op->ob_tid = 0;
_Py_atomic_store_ssize_relaxed(&op->ob_ref_shared, _Py_REF_MERGED);
#else #else
tstate->delete_later = (PyObject*) _PyGCHead_PREV(_Py_AS_GC(op)); tstate->delete_later = (PyObject*) _PyGCHead_PREV(_Py_AS_GC(op));
#endif #endif

View File

@ -455,6 +455,30 @@ mark_reachable(PyObject *op)
} }
#ifdef GC_DEBUG #ifdef GC_DEBUG
static bool
validate_refcounts(const mi_heap_t *heap, const mi_heap_area_t *area,
void *block, size_t block_size, void *args)
{
PyObject *op = op_from_block(block, args, false);
if (op == NULL) {
return true;
}
_PyObject_ASSERT_WITH_MSG(op, !gc_is_unreachable(op),
"object should not be marked as unreachable yet");
if (_Py_REF_IS_MERGED(op->ob_ref_shared)) {
_PyObject_ASSERT_WITH_MSG(op, op->ob_tid == 0,
"merged objects should have ob_tid == 0");
}
else if (!_Py_IsImmortal(op)) {
_PyObject_ASSERT_WITH_MSG(op, op->ob_tid != 0,
"unmerged objects should have ob_tid != 0");
}
return true;
}
static bool static bool
validate_gc_objects(const mi_heap_t *heap, const mi_heap_area_t *area, validate_gc_objects(const mi_heap_t *heap, const mi_heap_area_t *area,
void *block, size_t block_size, void *args) void *block, size_t block_size, void *args)
@ -498,6 +522,19 @@ mark_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
return true; return true;
} }
static bool
restore_refs(const mi_heap_t *heap, const mi_heap_area_t *area,
void *block, size_t block_size, void *args)
{
PyObject *op = op_from_block(block, args, false);
if (op == NULL) {
return true;
}
gc_restore_tid(op);
gc_clear_unreachable(op);
return true;
}
/* Return true if object has a pre-PEP 442 finalization method. */ /* Return true if object has a pre-PEP 442 finalization method. */
static int static int
has_legacy_finalizer(PyObject *op) has_legacy_finalizer(PyObject *op)
@ -549,6 +586,13 @@ static int
deduce_unreachable_heap(PyInterpreterState *interp, deduce_unreachable_heap(PyInterpreterState *interp,
struct collection_state *state) struct collection_state *state)
{ {
#ifdef GC_DEBUG
// Check that all objects are marked as unreachable and that the computed
// reference count difference (stored in `ob_tid`) is non-negative.
gc_visit_heaps(interp, &validate_refcounts, &state->base);
#endif
// Identify objects that are directly reachable from outside the GC heap // Identify objects that are directly reachable from outside the GC heap
// by computing the difference between the refcount and the number of // by computing the difference between the refcount and the number of
// incoming references. // incoming references.
@ -563,6 +607,8 @@ deduce_unreachable_heap(PyInterpreterState *interp,
// Transitively mark reachable objects by clearing the // Transitively mark reachable objects by clearing the
// _PyGC_BITS_UNREACHABLE flag. // _PyGC_BITS_UNREACHABLE flag.
if (gc_visit_heaps(interp, &mark_heap_visitor, &state->base) < 0) { if (gc_visit_heaps(interp, &mark_heap_visitor, &state->base) < 0) {
// On out-of-memory, restore the refcounts and bail out.
gc_visit_heaps(interp, &restore_refs, &state->base);
return -1; return -1;
} }
@ -1066,7 +1112,8 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
int err = deduce_unreachable_heap(interp, state); int err = deduce_unreachable_heap(interp, state);
if (err < 0) { if (err < 0) {
_PyEval_StartTheWorld(interp); _PyEval_StartTheWorld(interp);
goto error; PyErr_NoMemory();
return;
} }
// Print debugging information. // Print debugging information.
@ -1100,7 +1147,12 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
_PyEval_StartTheWorld(interp); _PyEval_StartTheWorld(interp);
if (err < 0) { if (err < 0) {
goto error; cleanup_worklist(&state->unreachable);
cleanup_worklist(&state->legacy_finalizers);
cleanup_worklist(&state->wrcb_to_call);
cleanup_worklist(&state->objs_to_decref);
PyErr_NoMemory();
return;
} }
// Call tp_clear on objects in the unreachable set. This will cause // Call tp_clear on objects in the unreachable set. This will cause
@ -1110,15 +1162,6 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
// Append objects with legacy finalizers to the "gc.garbage" list. // Append objects with legacy finalizers to the "gc.garbage" list.
handle_legacy_finalizers(state); handle_legacy_finalizers(state);
return;
error:
cleanup_worklist(&state->unreachable);
cleanup_worklist(&state->legacy_finalizers);
cleanup_worklist(&state->wrcb_to_call);
cleanup_worklist(&state->objs_to_decref);
PyErr_NoMemory();
PyErr_FormatUnraisable("Out of memory during garbage collection");
} }
/* This is the main function. Read this to understand how the /* This is the main function. Read this to understand how the