mirror of https://github.com/python/cpython
gh-112532: Tag mimalloc heaps and pages (#113742)
* gh-112532: Tag mimalloc heaps and pages Mimalloc pages are data structures that contain contiguous allocations of the same block size. Note that they are distinct from operating system pages. Mimalloc pages are contained in segments. When a thread exits, it abandons any segments and contained pages that have live allocations. These segments and pages may be later reclaimed by another thread. To support GC and certain thread-safety guarantees in free-threaded builds, we want pages to only be reclaimed by the corresponding heap in the claimant thread. For example, we want pages containing GC objects to only be claimed by GC heaps. This allows heaps and pages to be tagged with an integer tag that is used to ensure that abandoned pages are only claimed by heaps with the same tag. Heaps can be initialized with a tag (0-15); any page allocated by that heap copies the corresponding tag. * Fix conversion warning
This commit is contained in:
parent
eb53750757
commit
0b7476080b
|
@ -155,7 +155,7 @@ size_t _mi_bin_size(uint8_t bin); // for stats
|
|||
uint8_t _mi_bin(size_t size); // for stats
|
||||
|
||||
// "heap.c"
|
||||
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id);
|
||||
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag);
|
||||
void _mi_heap_destroy_pages(mi_heap_t* heap);
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap);
|
||||
void _mi_heap_set_default_direct(mi_heap_t* heap);
|
||||
|
|
|
@ -311,6 +311,7 @@ typedef struct mi_page_s {
|
|||
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
||||
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
|
||||
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
|
||||
uint8_t tag : 4; // tag from the owning heap
|
||||
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
|
@ -551,6 +552,7 @@ struct mi_heap_s {
|
|||
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||
mi_heap_t* next; // list of heaps per thread
|
||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
uint8_t tag; // custom identifier for this heap
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ mi_heap_t* mi_heap_get_backing(void) {
|
|||
return bheap;
|
||||
}
|
||||
|
||||
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id)
|
||||
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag)
|
||||
{
|
||||
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
||||
heap->tld = tld;
|
||||
|
@ -224,17 +224,19 @@ void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id)
|
|||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||
heap->keys[0] = _mi_heap_random_next(heap);
|
||||
heap->keys[1] = _mi_heap_random_next(heap);
|
||||
heap->no_reclaim = no_reclaim;
|
||||
heap->tag = tag;
|
||||
// push on the thread local heaps list
|
||||
heap->next = heap->tld->heaps;
|
||||
heap->tld->heaps = heap;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
||||
mi_heap_t* bheap = mi_heap_get_backing();
|
||||
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
||||
if (heap == NULL) return NULL;
|
||||
_mi_heap_init_ex(heap, bheap->tld, arena_id);
|
||||
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||
// push on the thread local heaps list
|
||||
heap->next = heap->tld->heaps;
|
||||
heap->tld->heaps = heap;
|
||||
// don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||
_mi_heap_init_ex(heap, bheap->tld, arena_id, true, 0);
|
||||
return heap;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
// Empty page used to initialize the small free pages array
|
||||
const mi_page_t _mi_page_empty = {
|
||||
0, false, false, false,
|
||||
0, false, false, false, 0,
|
||||
0, // capacity
|
||||
0, // reserved capacity
|
||||
{ 0 }, // flags
|
||||
|
@ -121,7 +121,8 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
|||
0, // page count
|
||||
MI_BIN_FULL, 0, // page retired min/max
|
||||
NULL, // next
|
||||
false
|
||||
false,
|
||||
0
|
||||
};
|
||||
|
||||
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
|
||||
|
@ -298,7 +299,7 @@ static bool _mi_heap_init(void) {
|
|||
if (td == NULL) return false;
|
||||
|
||||
_mi_tld_init(&td->tld, &td->heap);
|
||||
_mi_heap_init_ex(&td->heap, &td->tld, _mi_arena_id_none());
|
||||
_mi_heap_init_ex(&td->heap, &td->tld, _mi_arena_id_none(), false, 0);
|
||||
_mi_heap_set_default_direct(&td->heap);
|
||||
}
|
||||
return false;
|
||||
|
@ -311,7 +312,6 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
|
|||
tld->segments.abandoned = &_mi_abandoned_default;
|
||||
tld->os.stats = &tld->stats;
|
||||
tld->heap_backing = bheap;
|
||||
tld->heaps = bheap;
|
||||
}
|
||||
|
||||
// Free the thread local default heap (called from `mi_thread_done`)
|
||||
|
|
|
@ -660,6 +660,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
mi_assert_internal(block_size > 0);
|
||||
// set fields
|
||||
mi_page_set_heap(page, heap);
|
||||
page->tag = heap->tag;
|
||||
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
|
||||
size_t page_size;
|
||||
const void* page_start = _mi_segment_page_start(segment, page, &page_size);
|
||||
|
|
|
@ -1299,6 +1299,18 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
|
|||
return has_page;
|
||||
}
|
||||
|
||||
static mi_heap_t* mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) {
|
||||
if (heap->tag == tag) {
|
||||
return heap;
|
||||
}
|
||||
for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) {
|
||||
if (curr->tag == tag) {
|
||||
return curr;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Reclaim an abandoned segment; returns NULL if the segment was freed
|
||||
// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
|
||||
static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
|
||||
|
@ -1321,6 +1333,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
|||
if (mi_slice_is_used(slice)) {
|
||||
// in use: reclaim the page in our heap
|
||||
mi_page_t* page = mi_slice_to_page(slice);
|
||||
mi_heap_t* target_heap = mi_heap_by_tag(heap, page->tag);
|
||||
mi_assert_internal(page->is_committed);
|
||||
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
|
||||
mi_assert_internal(mi_page_heap(page) == NULL);
|
||||
|
@ -1328,7 +1341,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
|||
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
|
||||
segment->abandoned--;
|
||||
// set the heap again and allow delayed free again
|
||||
mi_page_set_heap(page, heap);
|
||||
mi_page_set_heap(page, target_heap);
|
||||
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
|
||||
_mi_page_free_collect(page, false); // ensure used count is up to date
|
||||
if (mi_page_all_free(page)) {
|
||||
|
@ -1337,8 +1350,9 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
|||
}
|
||||
else {
|
||||
// otherwise reclaim it into the heap
|
||||
_mi_page_reclaim(heap, page);
|
||||
if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) {
|
||||
_mi_page_reclaim(target_heap, page);
|
||||
if (requested_block_size == page->xblock_size && mi_page_has_any_available(page) &&
|
||||
heap == target_heap) {
|
||||
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2539,8 +2539,8 @@ tstate_mimalloc_bind(PyThreadState *tstate)
|
|||
tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
|
||||
|
||||
// Initialize each heap
|
||||
for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
|
||||
_mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none());
|
||||
for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
|
||||
_mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
|
||||
}
|
||||
|
||||
// By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.
|
||||
|
|
Loading…
Reference in New Issue