2023-10-30 12:43:11 -03:00
|
|
|
/*----------------------------------------------------------------------------
|
|
|
|
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
|
|
|
|
This is free software; you can redistribute it and/or modify it under the
|
|
|
|
terms of the MIT license. A copy of the license can be found in the file
|
|
|
|
"LICENSE" at the root of this distribution.
|
|
|
|
-----------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
#include "mimalloc.h"
|
|
|
|
#include "mimalloc/internal.h"
|
|
|
|
#include "mimalloc/atomic.h"
|
|
|
|
#include "mimalloc/prim.h" // mi_prim_get_default_heap
|
|
|
|
|
|
|
|
#include <string.h> // memset, memcpy
|
|
|
|
|
|
|
|
#if defined(_MSC_VER) && (_MSC_VER < 1920)
|
|
|
|
#pragma warning(disable:4204) // non-constant aggregate initializer
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* -----------------------------------------------------------
|
|
|
|
Helpers
|
|
|
|
----------------------------------------------------------- */
|
|
|
|
|
|
|
|
// return `true` if ok, `false` to break
|
|
|
|
typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2);
|
|
|
|
|
|
|
|
// Visit all pages in a heap; returns `false` if break was called.
|
|
|
|
static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2)
|
|
|
|
{
|
2024-01-22 17:10:21 -04:00
|
|
|
if (heap==NULL || heap->page_count==0) return true;
|
2023-10-30 12:43:11 -03:00
|
|
|
|
|
|
|
// visit all pages
|
|
|
|
#if MI_DEBUG>1
|
|
|
|
size_t total = heap->page_count;
|
|
|
|
size_t count = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
|
|
|
mi_page_queue_t* pq = &heap->pages[i];
|
|
|
|
mi_page_t* page = pq->first;
|
|
|
|
while(page != NULL) {
|
|
|
|
mi_page_t* next = page->next; // save next in case the page gets removed from the queue
|
|
|
|
mi_assert_internal(mi_page_heap(page) == heap);
|
|
|
|
#if MI_DEBUG>1
|
|
|
|
count++;
|
|
|
|
#endif
|
|
|
|
if (!fn(heap, pq, page, arg1, arg2)) return false;
|
|
|
|
page = next; // and continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mi_assert_internal(count == total);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if MI_DEBUG>=2
|
|
|
|
static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
|
|
|
|
MI_UNUSED(arg1);
|
|
|
|
MI_UNUSED(arg2);
|
|
|
|
MI_UNUSED(pq);
|
|
|
|
mi_assert_internal(mi_page_heap(page) == heap);
|
|
|
|
mi_segment_t* segment = _mi_page_segment(page);
|
|
|
|
mi_assert_internal(segment->thread_id == heap->thread_id);
|
|
|
|
mi_assert_expensive(_mi_page_is_valid(page));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if MI_DEBUG>=3
|
|
|
|
static bool mi_heap_is_valid(mi_heap_t* heap) {
|
|
|
|
mi_assert_internal(heap!=NULL);
|
|
|
|
mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* -----------------------------------------------------------
|
|
|
|
"Collect" pages by migrating `local_free` and `thread_free`
|
|
|
|
lists and freeing empty pages. This is done when a thread
|
|
|
|
stops (and in that case abandons pages if there are still
|
|
|
|
blocks alive)
|
|
|
|
----------------------------------------------------------- */
|
|
|
|
|
|
|
|
typedef enum mi_collect_e {
|
|
|
|
MI_NORMAL,
|
|
|
|
MI_FORCE,
|
|
|
|
MI_ABANDON
|
|
|
|
} mi_collect_t;
|
|
|
|
|
|
|
|
|
|
|
|
static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
|
|
|
|
MI_UNUSED(arg2);
|
|
|
|
MI_UNUSED(heap);
|
|
|
|
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
|
|
|
|
mi_collect_t collect = *((mi_collect_t*)arg_collect);
|
|
|
|
_mi_page_free_collect(page, collect >= MI_FORCE);
|
|
|
|
if (mi_page_all_free(page)) {
|
|
|
|
// no more used blocks, free the page.
|
|
|
|
// note: this will free retired pages as well.
|
2024-03-06 10:42:11 -04:00
|
|
|
bool freed = _PyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE);
|
|
|
|
if (!freed && collect == MI_ABANDON) {
|
|
|
|
_mi_page_abandon(page, pq);
|
|
|
|
}
|
2023-10-30 12:43:11 -03:00
|
|
|
}
|
|
|
|
else if (collect == MI_ABANDON) {
|
|
|
|
// still used blocks but the thread is done; abandon the page
|
|
|
|
_mi_page_abandon(page, pq);
|
|
|
|
}
|
|
|
|
return true; // don't break
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
|
|
|
|
MI_UNUSED(arg1);
|
|
|
|
MI_UNUSED(arg2);
|
|
|
|
MI_UNUSED(heap);
|
|
|
|
MI_UNUSED(pq);
|
|
|
|
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
|
|
|
|
return true; // don't break
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
|
|
|
{
|
|
|
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
|
|
|
|
|
|
|
const bool force = collect >= MI_FORCE;
|
|
|
|
_mi_deferred_free(heap, force);
|
|
|
|
|
2023-12-26 12:53:20 -04:00
|
|
|
// gh-112532: we may be called from a thread that is not the owner of the heap
|
|
|
|
bool is_main_thread = _mi_is_main_thread() && heap->thread_id == _mi_thread_id();
|
|
|
|
|
2023-10-30 12:43:11 -03:00
|
|
|
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
|
|
|
const bool force_main =
|
|
|
|
#ifdef NDEBUG
|
|
|
|
collect == MI_FORCE
|
|
|
|
#else
|
|
|
|
collect >= MI_FORCE
|
|
|
|
#endif
|
2023-12-26 12:53:20 -04:00
|
|
|
&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;
|
2023-10-30 12:43:11 -03:00
|
|
|
|
|
|
|
if (force_main) {
|
|
|
|
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
|
|
|
|
// if all memory is freed by now, all segments should be freed.
|
|
|
|
_mi_abandoned_reclaim_all(heap, &heap->tld->segments);
|
|
|
|
}
|
|
|
|
|
|
|
|
// if abandoning, mark all pages to no longer add to delayed_free
|
|
|
|
if (collect == MI_ABANDON) {
|
|
|
|
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
// free all current thread delayed blocks.
|
|
|
|
// (if abandoning, after this there are no more thread-delayed references into the pages.)
|
|
|
|
_mi_heap_delayed_free_all(heap);
|
|
|
|
|
|
|
|
// collect retired pages
|
|
|
|
_mi_heap_collect_retired(heap, force);
|
|
|
|
|
2024-03-06 10:42:11 -04:00
|
|
|
// free pages that were delayed with QSBR
|
|
|
|
_PyMem_mi_heap_collect_qsbr(heap);
|
|
|
|
|
2023-10-30 12:43:11 -03:00
|
|
|
// collect all pages owned by this thread
|
|
|
|
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
|
|
|
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
|
|
|
|
|
|
|
|
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
|
|
|
|
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
|
|
|
|
_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
|
|
|
|
|
|
|
|
// collect segment local caches
|
|
|
|
if (force) {
|
|
|
|
_mi_segment_thread_collect(&heap->tld->segments);
|
|
|
|
}
|
|
|
|
|
|
|
|
// collect regions on program-exit (or shared library unload)
|
2023-12-26 12:53:20 -04:00
|
|
|
if (force && is_main_thread && mi_heap_is_backing(heap)) {
|
2023-10-30 12:43:11 -03:00
|
|
|
_mi_thread_data_collect(); // collect thread data cache
|
|
|
|
_mi_arena_collect(true /* force purge */, &heap->tld->stats);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void _mi_heap_collect_abandon(mi_heap_t* heap) {
|
|
|
|
mi_heap_collect_ex(heap, MI_ABANDON);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
|
|
|
|
mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL));
|
|
|
|
}
|
|
|
|
|
|
|
|
void mi_collect(bool force) mi_attr_noexcept {
|
|
|
|
mi_heap_collect(mi_prim_get_default_heap(), force);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* -----------------------------------------------------------
|
|
|
|
Heap new
|
|
|
|
----------------------------------------------------------- */
|
|
|
|
|
|
|
|
mi_heap_t* mi_heap_get_default(void) {
|
|
|
|
mi_thread_init();
|
|
|
|
return mi_prim_get_default_heap();
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool mi_heap_is_default(const mi_heap_t* heap) {
|
|
|
|
return (heap == mi_prim_get_default_heap());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
mi_heap_t* mi_heap_get_backing(void) {
|
|
|
|
mi_heap_t* heap = mi_heap_get_default();
|
|
|
|
mi_assert_internal(heap!=NULL);
|
|
|
|
mi_heap_t* bheap = heap->tld->heap_backing;
|
|
|
|
mi_assert_internal(bheap!=NULL);
|
|
|
|
mi_assert_internal(bheap->thread_id == _mi_thread_id());
|
|
|
|
return bheap;
|
|
|
|
}
|
|
|
|
|
2024-01-05 16:08:50 -04:00
|
|
|
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag)
|
2023-12-26 12:53:20 -04:00
|
|
|
{
|
2023-10-30 12:43:11 -03:00
|
|
|
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
2023-12-26 12:53:20 -04:00
|
|
|
heap->tld = tld;
|
2023-10-30 12:43:11 -03:00
|
|
|
heap->thread_id = _mi_thread_id();
|
|
|
|
heap->arena_id = arena_id;
|
2023-12-26 12:53:20 -04:00
|
|
|
if (heap == tld->heap_backing) {
|
|
|
|
_mi_random_init(&heap->random);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
_mi_random_split(&tld->heap_backing->random, &heap->random);
|
|
|
|
}
|
2023-10-30 12:43:11 -03:00
|
|
|
heap->cookie = _mi_heap_random_next(heap) | 1;
|
|
|
|
heap->keys[0] = _mi_heap_random_next(heap);
|
|
|
|
heap->keys[1] = _mi_heap_random_next(heap);
|
2024-01-05 16:08:50 -04:00
|
|
|
heap->no_reclaim = no_reclaim;
|
|
|
|
heap->tag = tag;
|
|
|
|
// push on the thread local heaps list
|
|
|
|
heap->next = heap->tld->heaps;
|
|
|
|
heap->tld->heaps = heap;
|
2023-12-26 12:53:20 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
|
|
|
mi_heap_t* bheap = mi_heap_get_backing();
|
|
|
|
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
|
|
|
if (heap == NULL) return NULL;
|
2024-01-05 16:08:50 -04:00
|
|
|
// don't reclaim abandoned pages or otherwise destroy is unsafe
|
|
|
|
_mi_heap_init_ex(heap, bheap->tld, arena_id, true, 0);
|
2023-10-30 12:43:11 -03:00
|
|
|
return heap;
|
|
|
|
}
|
|
|
|
|
|
|
|
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
|
|
|
return mi_heap_new_in_arena(_mi_arena_id_none());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
|
|
|
|
return _mi_arena_memid_is_suitable(memid, heap->arena_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
|
|
|
return _mi_random_next(&heap->random);
|
|
|
|
}
|
|
|
|
|
|
|
|
// zero out the page queues
|
|
|
|
static void mi_heap_reset_pages(mi_heap_t* heap) {
|
|
|
|
mi_assert_internal(heap != NULL);
|
|
|
|
mi_assert_internal(mi_heap_is_initialized(heap));
|
|
|
|
// TODO: copy full empty heap instead?
|
|
|
|
memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
|
|
|
|
_mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
|
|
|
|
heap->thread_delayed_free = NULL;
|
|
|
|
heap->page_count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
|
|
|
|
static void mi_heap_free(mi_heap_t* heap) {
|
|
|
|
mi_assert(heap != NULL);
|
|
|
|
mi_assert_internal(mi_heap_is_initialized(heap));
|
|
|
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
|
|
|
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
|
|
|
|
|
|
|
|
// reset default
|
|
|
|
if (mi_heap_is_default(heap)) {
|
|
|
|
_mi_heap_set_default_direct(heap->tld->heap_backing);
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove ourselves from the thread local heaps list
|
|
|
|
// linear search but we expect the number of heaps to be relatively small
|
|
|
|
mi_heap_t* prev = NULL;
|
|
|
|
mi_heap_t* curr = heap->tld->heaps;
|
|
|
|
while (curr != heap && curr != NULL) {
|
|
|
|
prev = curr;
|
|
|
|
curr = curr->next;
|
|
|
|
}
|
|
|
|
mi_assert_internal(curr == heap);
|
|
|
|
if (curr == heap) {
|
|
|
|
if (prev != NULL) { prev->next = heap->next; }
|
|
|
|
else { heap->tld->heaps = heap->next; }
|
|
|
|
}
|
|
|
|
mi_assert_internal(heap->tld->heaps != NULL);
|
|
|
|
|
|
|
|
// and free the used memory
|
|
|
|
mi_free(heap);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* -----------------------------------------------------------
|
|
|
|
Heap destroy
|
|
|
|
----------------------------------------------------------- */
|
|
|
|
|
|
|
|
static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
|
|
|
|
MI_UNUSED(arg1);
|
|
|
|
MI_UNUSED(arg2);
|
|
|
|
MI_UNUSED(heap);
|
|
|
|
MI_UNUSED(pq);
|
|
|
|
|
|
|
|
// ensure no more thread_delayed_free will be added
|
|
|
|
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
|
|
|
|
|
|
|
|
// stats
|
|
|
|
const size_t bsize = mi_page_block_size(page);
|
|
|
|
if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
|
|
|
|
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
|
|
|
mi_heap_stat_decrease(heap, large, bsize);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
mi_heap_stat_decrease(heap, huge, bsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#if (MI_STAT)
|
|
|
|
_mi_page_free_collect(page, false); // update used count
|
|
|
|
const size_t inuse = page->used;
|
|
|
|
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
|
|
|
mi_heap_stat_decrease(heap, normal, bsize * inuse);
|
|
|
|
#if (MI_STAT>1)
|
|
|
|
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/// pretend it is all free now
|
|
|
|
mi_assert_internal(mi_page_thread_free(page) == NULL);
|
|
|
|
page->used = 0;
|
|
|
|
|
|
|
|
// and free the page
|
|
|
|
// mi_page_free(page,false);
|
|
|
|
page->next = NULL;
|
|
|
|
page->prev = NULL;
|
|
|
|
_mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
|
|
|
|
|
|
|
|
return true; // keep going
|
|
|
|
}
|
|
|
|
|
|
|
|
void _mi_heap_destroy_pages(mi_heap_t* heap) {
|
|
|
|
mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL);
|
|
|
|
mi_heap_reset_pages(heap);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if MI_TRACK_HEAP_DESTROY
|
|
|
|
static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
|
|
|
|
MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
|
|
|
|
mi_track_free_size(block,mi_usable_size(block));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void mi_heap_destroy(mi_heap_t* heap) {
|
|
|
|
mi_assert(heap != NULL);
|
|
|
|
mi_assert(mi_heap_is_initialized(heap));
|
|
|
|
mi_assert(heap->no_reclaim);
|
|
|
|
mi_assert_expensive(mi_heap_is_valid(heap));
|
|
|
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
|
|
|
if (!heap->no_reclaim) {
|
|
|
|
// don't free in case it may contain reclaimed pages
|
|
|
|
mi_heap_delete(heap);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// track all blocks as freed
|
|
|
|
#if MI_TRACK_HEAP_DESTROY
|
|
|
|
mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
|
|
|
|
#endif
|
|
|
|
// free all pages
|
|
|
|
_mi_heap_destroy_pages(heap);
|
|
|
|
mi_heap_free(heap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// forcefully destroy all heaps in the current thread
|
|
|
|
void _mi_heap_unsafe_destroy_all(void) {
|
|
|
|
mi_heap_t* bheap = mi_heap_get_backing();
|
|
|
|
mi_heap_t* curr = bheap->tld->heaps;
|
|
|
|
while (curr != NULL) {
|
|
|
|
mi_heap_t* next = curr->next;
|
|
|
|
if (curr->no_reclaim) {
|
|
|
|
mi_heap_destroy(curr);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
_mi_heap_destroy_pages(curr);
|
|
|
|
}
|
|
|
|
curr = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* -----------------------------------------------------------
|
|
|
|
Safe Heap delete
|
|
|
|
----------------------------------------------------------- */
|
|
|
|
|
|
|
|
// Transfer the pages from one heap to the other
|
|
|
|
static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
|
|
|
|
mi_assert_internal(heap!=NULL);
|
|
|
|
if (from==NULL || from->page_count == 0) return;
|
|
|
|
|
|
|
|
// reduce the size of the delayed frees
|
|
|
|
_mi_heap_delayed_free_partial(from);
|
|
|
|
|
|
|
|
// transfer all pages by appending the queues; this will set a new heap field
|
|
|
|
// so threads may do delayed frees in either heap for a while.
|
|
|
|
// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
|
|
|
|
// so after this only the new heap will get delayed frees
|
|
|
|
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
|
|
|
mi_page_queue_t* pq = &heap->pages[i];
|
|
|
|
mi_page_queue_t* append = &from->pages[i];
|
|
|
|
size_t pcount = _mi_page_queue_append(heap, pq, append);
|
|
|
|
heap->page_count += pcount;
|
|
|
|
from->page_count -= pcount;
|
|
|
|
}
|
|
|
|
mi_assert_internal(from->page_count == 0);
|
|
|
|
|
|
|
|
// and do outstanding delayed frees in the `from` heap
|
|
|
|
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
|
|
|
|
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
|
|
|
|
// the regular `_mi_free_delayed_block` which is safe.
|
|
|
|
_mi_heap_delayed_free_all(from);
|
|
|
|
#if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
|
|
|
|
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// and reset the `from` heap
|
|
|
|
mi_heap_reset_pages(from);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Safe delete a heap without freeing any still allocated blocks in that heap.
|
|
|
|
void mi_heap_delete(mi_heap_t* heap)
|
|
|
|
{
|
|
|
|
mi_assert(heap != NULL);
|
|
|
|
mi_assert(mi_heap_is_initialized(heap));
|
|
|
|
mi_assert_expensive(mi_heap_is_valid(heap));
|
|
|
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
|
|
|
|
|
|
|
if (!mi_heap_is_backing(heap)) {
|
|
|
|
// tranfer still used pages to the backing heap
|
|
|
|
mi_heap_absorb(heap->tld->heap_backing, heap);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
// the backing heap abandons its pages
|
|
|
|
_mi_heap_collect_abandon(heap);
|
|
|
|
}
|
|
|
|
mi_assert_internal(heap->page_count==0);
|
|
|
|
mi_heap_free(heap);
|
|
|
|
}
|
|
|
|
|
|
|
|
mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
|
|
|
|
mi_assert(heap != NULL);
|
|
|
|
mi_assert(mi_heap_is_initialized(heap));
|
|
|
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
|
|
|
|
mi_assert_expensive(mi_heap_is_valid(heap));
|
|
|
|
mi_heap_t* old = mi_prim_get_default_heap();
|
|
|
|
_mi_heap_set_default_direct(heap);
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* -----------------------------------------------------------
|
|
|
|
Analysis
|
|
|
|
----------------------------------------------------------- */
|
|
|
|
|
|
|
|
// static since it is not thread safe to access heaps from other threads.
|
|
|
|
static mi_heap_t* mi_heap_of_block(const void* p) {
|
|
|
|
if (p == NULL) return NULL;
|
|
|
|
mi_segment_t* segment = _mi_ptr_segment(p);
|
|
|
|
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
|
|
|
|
mi_assert_internal(valid);
|
|
|
|
if mi_unlikely(!valid) return NULL;
|
|
|
|
return mi_page_heap(_mi_segment_page_of(segment,p));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
|
|
|
|
mi_assert(heap != NULL);
|
|
|
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
|
|
|
|
return (heap == mi_heap_of_block(p));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {
|
|
|
|
MI_UNUSED(heap);
|
|
|
|
MI_UNUSED(pq);
|
|
|
|
bool* found = (bool*)vfound;
|
|
|
|
mi_segment_t* segment = _mi_page_segment(page);
|
|
|
|
void* start = _mi_page_start(segment, page, NULL);
|
|
|
|
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
|
|
|
|
*found = (p >= start && p < end);
|
|
|
|
return (!*found); // continue if not found
|
|
|
|
}
|
|
|
|
|
|
|
|
bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
|
|
|
|
mi_assert(heap != NULL);
|
|
|
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
|
|
|
|
if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers
|
|
|
|
bool found = false;
|
|
|
|
mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool mi_check_owned(const void* p) {
|
|
|
|
return mi_heap_check_owned(mi_prim_get_default_heap(), p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* -----------------------------------------------------------
|
|
|
|
Visit all heap blocks and areas
|
|
|
|
Todo: enable visiting abandoned pages, and
|
|
|
|
enable visiting all blocks of all heaps across threads
|
|
|
|
----------------------------------------------------------- */
|
|
|
|
|
|
|
|
// Separate struct to keep `mi_page_t` out of the public interface
|
|
|
|
typedef struct mi_heap_area_ex_s {
|
|
|
|
mi_heap_area_t area;
|
|
|
|
mi_page_t* page;
|
|
|
|
} mi_heap_area_ex_t;
|
|
|
|
|
2024-01-22 17:10:21 -04:00
|
|
|
static void mi_fast_divisor(size_t divisor, size_t* magic, size_t* shift) {
|
|
|
|
mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
|
|
|
|
*shift = MI_INTPTR_BITS - mi_clz(divisor - 1);
|
|
|
|
*magic = (size_t)(((1ULL << 32) * ((1ULL << *shift) - divisor)) / divisor + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t mi_fast_divide(size_t n, size_t magic, size_t shift) {
|
|
|
|
mi_assert_internal(n <= UINT32_MAX);
|
|
|
|
return ((((uint64_t) n * magic) >> 32) + n) >> shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg) {
|
|
|
|
mi_assert(area != NULL);
|
|
|
|
if (area==NULL) return true;
|
2023-10-30 12:43:11 -03:00
|
|
|
mi_assert(page != NULL);
|
|
|
|
if (page == NULL) return true;
|
|
|
|
|
|
|
|
mi_assert_internal(page->local_free == NULL);
|
|
|
|
if (page->used == 0) return true;
|
|
|
|
|
|
|
|
const size_t bsize = mi_page_block_size(page);
|
|
|
|
const size_t ubsize = mi_page_usable_block_size(page); // without padding
|
|
|
|
size_t psize;
|
|
|
|
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
|
2024-01-22 17:10:21 -04:00
|
|
|
mi_heap_t* heap = mi_page_heap(page);
|
2023-10-30 12:43:11 -03:00
|
|
|
|
|
|
|
if (page->capacity == 1) {
|
|
|
|
// optimize page with one block
|
|
|
|
mi_assert_internal(page->used == 1 && page->free == NULL);
|
2024-01-22 17:10:21 -04:00
|
|
|
return visitor(heap, area, pstart, ubsize, arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (page->used == page->capacity) {
|
|
|
|
// optimize full pages
|
|
|
|
uint8_t* block = pstart;
|
|
|
|
for (size_t i = 0; i < page->capacity; i++) {
|
|
|
|
if (!visitor(heap, area, block, ubsize, arg)) return false;
|
|
|
|
block += bsize;
|
|
|
|
}
|
|
|
|
return true;
|
2023-10-30 12:43:11 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
// create a bitmap of free blocks.
|
|
|
|
#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
|
2024-01-22 17:10:21 -04:00
|
|
|
uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS];
|
|
|
|
size_t bmapsize = (page->capacity + MI_INTPTR_BITS - 1) / MI_INTPTR_BITS;
|
|
|
|
memset(free_map, 0, bmapsize * sizeof(uintptr_t));
|
|
|
|
|
|
|
|
if (page->capacity % MI_INTPTR_BITS != 0) {
|
|
|
|
size_t shift = (page->capacity % MI_INTPTR_BITS);
|
|
|
|
uintptr_t mask = (UINTPTR_MAX << shift);
|
|
|
|
free_map[bmapsize-1] = mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
// fast repeated division by the block size
|
|
|
|
size_t magic, shift;
|
|
|
|
mi_fast_divisor(bsize, &magic, &shift);
|
2023-10-30 12:43:11 -03:00
|
|
|
|
|
|
|
#if MI_DEBUG>1
|
|
|
|
size_t free_count = 0;
|
|
|
|
#endif
|
|
|
|
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
|
|
|
|
#if MI_DEBUG>1
|
|
|
|
free_count++;
|
|
|
|
#endif
|
|
|
|
mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
|
|
|
|
size_t offset = (uint8_t*)block - pstart;
|
|
|
|
mi_assert_internal(offset % bsize == 0);
|
2024-01-22 17:10:21 -04:00
|
|
|
size_t blockidx = mi_fast_divide(offset, magic, shift);
|
|
|
|
mi_assert_internal(blockidx == offset / bsize);
|
|
|
|
mi_assert_internal(blockidx < MI_MAX_BLOCKS);
|
|
|
|
size_t bitidx = (blockidx / MI_INTPTR_BITS);
|
|
|
|
size_t bit = blockidx - (bitidx * MI_INTPTR_BITS);
|
2023-10-30 12:43:11 -03:00
|
|
|
free_map[bitidx] |= ((uintptr_t)1 << bit);
|
|
|
|
}
|
|
|
|
mi_assert_internal(page->capacity == (free_count + page->used));
|
|
|
|
|
|
|
|
// walk through all blocks skipping the free ones
|
|
|
|
#if MI_DEBUG>1
|
|
|
|
size_t used_count = 0;
|
|
|
|
#endif
|
2024-01-22 17:10:21 -04:00
|
|
|
uint8_t* block = pstart;
|
|
|
|
for (size_t i = 0; i < bmapsize; i++) {
|
|
|
|
if (free_map[i] == 0) {
|
|
|
|
// every block is in use
|
|
|
|
for (size_t j = 0; j < MI_INTPTR_BITS; j++) {
|
|
|
|
#if MI_DEBUG>1
|
|
|
|
used_count++;
|
|
|
|
#endif
|
|
|
|
if (!visitor(heap, area, block, ubsize, arg)) return false;
|
|
|
|
block += bsize;
|
|
|
|
}
|
2023-10-30 12:43:11 -03:00
|
|
|
}
|
2024-01-22 17:10:21 -04:00
|
|
|
else {
|
|
|
|
uintptr_t m = ~free_map[i];
|
|
|
|
while (m) {
|
|
|
|
#if MI_DEBUG>1
|
|
|
|
used_count++;
|
|
|
|
#endif
|
|
|
|
size_t bitidx = mi_ctz(m);
|
|
|
|
if (!visitor(heap, area, block + (bitidx * bsize), ubsize, arg)) return false;
|
|
|
|
m &= m - 1;
|
|
|
|
}
|
|
|
|
block += bsize * MI_INTPTR_BITS;
|
2023-10-30 12:43:11 -03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mi_assert_internal(page->used == used_count);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg);
|
|
|
|
|
2024-01-22 17:10:21 -04:00
|
|
|
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
|
2024-02-09 10:23:12 -04:00
|
|
|
_mi_page_free_collect(page,true);
|
2024-01-22 17:10:21 -04:00
|
|
|
const size_t bsize = mi_page_block_size(page);
|
|
|
|
const size_t ubsize = mi_page_usable_block_size(page);
|
|
|
|
area->reserved = page->reserved * bsize;
|
|
|
|
area->committed = page->capacity * bsize;
|
|
|
|
area->blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
|
|
|
|
area->used = page->used; // number of blocks in use (#553)
|
|
|
|
area->block_size = ubsize;
|
|
|
|
area->full_block_size = bsize;
|
|
|
|
}
|
2023-10-30 12:43:11 -03:00
|
|
|
|
|
|
|
static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {
|
|
|
|
MI_UNUSED(heap);
|
|
|
|
MI_UNUSED(pq);
|
|
|
|
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
|
|
|
|
mi_heap_area_ex_t xarea;
|
|
|
|
xarea.page = page;
|
2024-01-22 17:10:21 -04:00
|
|
|
_mi_heap_area_init(&xarea.area, page);
|
2023-10-30 12:43:11 -03:00
|
|
|
return fun(heap, &xarea, arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Visit all heap pages as areas
|
|
|
|
static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) {
|
|
|
|
if (visitor == NULL) return false;
|
|
|
|
return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{
|
|
|
|
}
|
|
|
|
|
|
|
|
// Just to pass arguments
|
|
|
|
typedef struct mi_visit_blocks_args_s {
|
|
|
|
bool visit_blocks;
|
|
|
|
mi_block_visit_fun* visitor;
|
|
|
|
void* arg;
|
|
|
|
} mi_visit_blocks_args_t;
|
|
|
|
|
|
|
|
static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) {
|
|
|
|
mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg;
|
|
|
|
if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false;
|
|
|
|
if (args->visit_blocks) {
|
2024-01-22 17:10:21 -04:00
|
|
|
return _mi_heap_area_visit_blocks(&xarea->area, xarea->page, args->visitor, args->arg);
|
2023-10-30 12:43:11 -03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Visit all blocks in a heap
|
|
|
|
bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
|
|
|
|
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
|
2024-01-22 17:10:21 -04:00
|
|
|
_mi_heap_delayed_free_partial((mi_heap_t *)heap);
|
2023-10-30 12:43:11 -03:00
|
|
|
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
|
|
|
|
}
|