mirror of https://github.com/python/cpython
gh-115184: Fix refleak tracking issues in free-threaded build (#115188)
Fixes a few issues related to refleak tracking in the free-threaded build: - Count blocks in abandoned segments - Call `_mi_page_free_collect` earlier during heap traversal in order to get an accurate count of blocks in use. - Add missing refcount tracking in `_Py_DecRefSharedDebug` and `_Py_ExplicitMergeRefcount`. - Pause threads in `get_num_global_allocated_blocks` to ensure that traversing the mimalloc heaps is safe.
This commit is contained in:
parent
769d444826
commit
31633f4473
|
@ -538,7 +538,6 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_
|
||||||
mi_assert(page != NULL);
|
mi_assert(page != NULL);
|
||||||
if (page == NULL) return true;
|
if (page == NULL) return true;
|
||||||
|
|
||||||
_mi_page_free_collect(page,true);
|
|
||||||
mi_assert_internal(page->local_free == NULL);
|
mi_assert_internal(page->local_free == NULL);
|
||||||
if (page->used == 0) return true;
|
if (page->used == 0) return true;
|
||||||
|
|
||||||
|
@ -635,6 +634,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_
|
||||||
typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
|
typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
|
||||||
|
|
||||||
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
|
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
|
||||||
|
_mi_page_free_collect(page,true);
|
||||||
const size_t bsize = mi_page_block_size(page);
|
const size_t bsize = mi_page_block_size(page);
|
||||||
const size_t ubsize = mi_page_usable_block_size(page);
|
const size_t ubsize = mi_page_usable_block_size(page);
|
||||||
area->reserved = page->reserved * bsize;
|
area->reserved = page->reserved * bsize;
|
||||||
|
|
|
@ -346,6 +346,9 @@ _Py_DecRefSharedDebug(PyObject *o, const char *filename, int lineno)
|
||||||
if (should_queue) {
|
if (should_queue) {
|
||||||
// TODO: the inter-thread queue is not yet implemented. For now,
|
// TODO: the inter-thread queue is not yet implemented. For now,
|
||||||
// we just merge the refcount here.
|
// we just merge the refcount here.
|
||||||
|
#ifdef Py_REF_DEBUG
|
||||||
|
_Py_IncRefTotal(_PyInterpreterState_GET());
|
||||||
|
#endif
|
||||||
Py_ssize_t refcount = _Py_ExplicitMergeRefcount(o, -1);
|
Py_ssize_t refcount = _Py_ExplicitMergeRefcount(o, -1);
|
||||||
if (refcount == 0) {
|
if (refcount == 0) {
|
||||||
_Py_Dealloc(o);
|
_Py_Dealloc(o);
|
||||||
|
@ -399,10 +402,6 @@ _Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra)
|
||||||
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
|
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
|
||||||
do {
|
do {
|
||||||
refcnt = Py_ARITHMETIC_RIGHT_SHIFT(Py_ssize_t, shared, _Py_REF_SHARED_SHIFT);
|
refcnt = Py_ARITHMETIC_RIGHT_SHIFT(Py_ssize_t, shared, _Py_REF_SHARED_SHIFT);
|
||||||
if (_Py_REF_IS_MERGED(shared)) {
|
|
||||||
return refcnt;
|
|
||||||
}
|
|
||||||
|
|
||||||
refcnt += (Py_ssize_t)op->ob_ref_local;
|
refcnt += (Py_ssize_t)op->ob_ref_local;
|
||||||
refcnt += extra;
|
refcnt += extra;
|
||||||
|
|
||||||
|
@ -410,6 +409,10 @@ _Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra)
|
||||||
} while (!_Py_atomic_compare_exchange_ssize(&op->ob_ref_shared,
|
} while (!_Py_atomic_compare_exchange_ssize(&op->ob_ref_shared,
|
||||||
&shared, new_shared));
|
&shared, new_shared));
|
||||||
|
|
||||||
|
#ifdef Py_REF_DEBUG
|
||||||
|
_Py_AddRefTotal(_PyInterpreterState_GET(), extra);
|
||||||
|
#endif
|
||||||
|
|
||||||
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, 0);
|
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, 0);
|
||||||
_Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);
|
_Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);
|
||||||
return refcnt;
|
return refcnt;
|
||||||
|
|
|
@ -1073,7 +1073,12 @@ get_mimalloc_allocated_blocks(PyInterpreterState *interp)
|
||||||
mi_heap_visit_blocks(heap, false, &count_blocks, &allocated_blocks);
|
mi_heap_visit_blocks(heap, false, &count_blocks, &allocated_blocks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// TODO(sgross): count blocks in abandoned segments.
|
|
||||||
|
mi_abandoned_pool_t *pool = &interp->mimalloc.abandoned_pool;
|
||||||
|
for (uint8_t tag = 0; tag < _Py_MIMALLOC_HEAP_COUNT; tag++) {
|
||||||
|
_mi_abandoned_pool_visit_blocks(pool, tag, false, &count_blocks,
|
||||||
|
&allocated_blocks);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
// TODO(sgross): this only counts the current thread's blocks.
|
// TODO(sgross): this only counts the current thread's blocks.
|
||||||
mi_heap_t *heap = mi_heap_get_default();
|
mi_heap_t *heap = mi_heap_get_default();
|
||||||
|
@ -1189,6 +1194,7 @@ get_num_global_allocated_blocks(_PyRuntimeState *runtime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
_PyEval_StopTheWorldAll(&_PyRuntime);
|
||||||
HEAD_LOCK(runtime);
|
HEAD_LOCK(runtime);
|
||||||
PyInterpreterState *interp = PyInterpreterState_Head();
|
PyInterpreterState *interp = PyInterpreterState_Head();
|
||||||
assert(interp != NULL);
|
assert(interp != NULL);
|
||||||
|
@ -1208,6 +1214,7 @@ get_num_global_allocated_blocks(_PyRuntimeState *runtime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
HEAD_UNLOCK(runtime);
|
HEAD_UNLOCK(runtime);
|
||||||
|
_PyEval_StartTheWorldAll(&_PyRuntime);
|
||||||
#ifdef Py_DEBUG
|
#ifdef Py_DEBUG
|
||||||
assert(got_main);
|
assert(got_main);
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue