HAL_ChibiOS: added link list of all memory allocations

check all allocations for corruption on free() and malloc_check()
This commit is contained in:
Andrew Tridgell 2020-11-14 08:48:13 +11:00 committed by Peter Barker
parent 9a026b8068
commit 95a823a702

View File

@ -46,6 +46,10 @@ static const struct memory_region {
uint32_t flags;
} memory_regions[] = { HAL_MEMORY_REGIONS };
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
static mutex_t mem_mutex;
#endif
// the first memory region is already setup as the ChibiOS
// default heap, so we will index from 1 in the allocators
#define NUM_MEMORY_REGIONS (sizeof(memory_regions)/sizeof(memory_regions[0]))
@ -76,6 +80,10 @@ static memory_heap_t dma_reserve_heap;
*/
void malloc_init(void)
{
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
chMtxObjectInit(&mem_mutex);
#endif
uint8_t i;
for (i=1; i<NUM_MEMORY_REGIONS; i++) {
chHeapObjectInit(&heaps[i], memory_regions[i].address, memory_regions[i].size);
@ -180,8 +188,22 @@ found:
}
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
/*
memory guard system. We put all allocated memory in a doubly linked
list and add canary bytes at the front and back of all
allocations. On all free operations, plus on calls to malloc_check()
we walk the list and check for memory corruption, flagging an
internal error if one is found
*/
struct memguard {
uint32_t size;
uint32_t inv_size;
struct memguard *next, *prev;
uint32_t pad[4]; // pad to 32 bytes
};
static struct memguard *mg_head;
#define MALLOC_HEAD_SIZE DMA_ALIGNMENT
#define MALLOC_HEAD_SIZE sizeof(struct memguard)
#define MALLOC_GUARD_SIZE DMA_ALIGNMENT
#define MALLOC_GUARD1_START 73
#define MALLOC_GUARD2_START 172
@ -191,6 +213,8 @@ found:
*/
static void *malloc_flags_guard(size_t size, uint32_t flags)
{
chMtxLock(&mem_mutex);
if (flags & (MEM_REGION_FLAG_DMA_OK | MEM_REGION_FLAG_SDCARD)) {
size = (size + (DMA_ALIGNMENT-1U)) & ~(DMA_ALIGNMENT-1U);
} else {
@ -198,17 +222,26 @@ static void *malloc_flags_guard(size_t size, uint32_t flags)
}
void *ret = malloc_flags(size+MALLOC_GUARD_SIZE*2+MALLOC_HEAD_SIZE, flags);
if (!ret) {
chMtxUnlock(&mem_mutex);
return NULL;
}
uint32_t *bu = (uint32_t *)ret;
uint8_t *b1 = ((uint8_t *)bu) + MALLOC_HEAD_SIZE;
struct memguard *mg = (struct memguard *)ret;
uint8_t *b1 = (uint8_t *)&mg[1];
uint8_t *b2 = b1 + MALLOC_GUARD_SIZE + size;
bu[0] = size;
bu[1] = ~size;
mg->size = size;
mg->inv_size = ~size;
for (uint32_t i=0; i<MALLOC_GUARD_SIZE; i++) {
b1[i] = (uint8_t)(MALLOC_GUARD1_START + i);
b2[i] = (uint8_t)(MALLOC_GUARD2_START + i);
}
if (mg_head != NULL) {
mg->next = mg_head;
mg_head->prev = mg;
}
mg_head = mg;
chMtxUnlock(&mem_mutex);
return (void *)(b1+MALLOC_GUARD_SIZE);
}
@ -217,24 +250,15 @@ extern void AP_memory_guard_error(uint32_t size);
/*
check for errors in malloc memory using guard bytes
*/
void malloc_check(const void *p)
void malloc_check_mg(const struct memguard *mg)
{
if (p == NULL) {
return;
}
if (((uintptr_t)p) & 3) {
// misaligned memory
if (mg->size != ~mg->inv_size) {
AP_memory_guard_error(0);
return;
}
const volatile uint32_t *bu = (uint32_t *)(((uint8_t *)p) - (MALLOC_GUARD_SIZE+MALLOC_HEAD_SIZE));
if (bu[1] != ~bu[0]) {
AP_memory_guard_error(0);
return;
}
const uint32_t size = bu[0];
const volatile uint8_t *b1 = ((uint8_t *)bu) + MALLOC_HEAD_SIZE;
const volatile uint8_t *b2 = b1 + MALLOC_GUARD_SIZE + size;
const uint32_t size = mg->size;
const uint8_t *b1 = (uint8_t *)&mg[1];
const uint8_t *b2 = b1 + MALLOC_GUARD_SIZE + size;
for (uint32_t i=0; i<MALLOC_GUARD_SIZE; i++) {
if (b1[i] != (uint8_t)(MALLOC_GUARD1_START + i) ||
b2[i] != (uint8_t)(MALLOC_GUARD2_START + i)) {
@ -244,10 +268,56 @@ void malloc_check(const void *p)
}
}
/*
check for errors across entire allocation list
*/
void malloc_check_all(void)
{
for (struct memguard *mg=mg_head; mg; mg=mg->next) {
malloc_check_mg(mg);
}
}
/*
check for errors in malloc memory using guard bytes
*/
void malloc_check(const void *p)
{
if (p == NULL) {
// allow for malloc_check(nullptr) to check all allocated memory
chMtxLock(&mem_mutex);
malloc_check_all();
chMtxUnlock(&mem_mutex);
return;
}
if (((uintptr_t)p) & 3) {
// misaligned memory
AP_memory_guard_error(0);
return;
}
chMtxLock(&mem_mutex);
struct memguard *mg = (struct memguard *)(((uint8_t *)p) - (MALLOC_GUARD_SIZE+MALLOC_HEAD_SIZE));
malloc_check_mg(mg);
malloc_check_all();
chMtxUnlock(&mem_mutex);
}
static void free_guard(void *p)
{
chMtxLock(&mem_mutex);
malloc_check(p);
struct memguard *mg = (struct memguard *)(((uint8_t *)p) - (MALLOC_GUARD_SIZE+MALLOC_HEAD_SIZE));
if (mg->next) {
mg->next->prev = mg->prev;
}
if (mg->prev) {
mg->prev->next = mg->next;
}
if (mg == mg_head) {
mg_head = mg->next;
}
chHeapFree((void*)(((uint8_t *)p) - (MALLOC_GUARD_SIZE+MALLOC_HEAD_SIZE)));
chMtxUnlock(&mem_mutex);
}
#define malloc_flags(size, flags) malloc_flags_guard(size, flags)