2018-01-05 02:19:51 -04:00
|
|
|
/*
|
|
|
|
* Copyright (C) Siddharth Bharat Purohit 2017
|
|
|
|
* This file is free software: you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation, either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This file is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
* See the GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
wrappers for allocation functions
|
|
|
|
|
|
|
|
Relies on linker wrap options
|
|
|
|
|
|
|
|
Note that not all functions that have been wrapped are implemented
|
|
|
|
here. The others are wrapped to ensure the function is not used
|
|
|
|
without an implementation. If we need them then we can implement as
|
|
|
|
needed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <hal.h>
|
2018-06-08 13:05:48 -03:00
|
|
|
#include <ch.h>
|
2018-01-05 02:19:51 -04:00
|
|
|
#include <stdarg.h>
|
2020-11-13 01:11:19 -04:00
|
|
|
#include <stdint.h>
|
2018-06-02 00:27:02 -03:00
|
|
|
#include "stm32_util.h"
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2020-11-13 01:11:19 -04:00
|
|
|
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
|
|
|
|
#pragma GCC optimize("Og")
|
|
|
|
#endif
|
|
|
|
|
2019-02-17 07:23:34 -04:00
|
|
|
#define MEM_REGION_FLAG_DMA_OK 1
|
|
|
|
#define MEM_REGION_FLAG_FAST 2
|
2021-05-29 16:47:40 -03:00
|
|
|
#define MEM_REGION_FLAG_AXI_BUS 4
|
2023-07-15 03:20:16 -03:00
|
|
|
#define MEM_REGION_FLAG_ETH_SAFE 8
|
2019-02-17 07:23:34 -04:00
|
|
|
|
2020-11-13 17:48:13 -04:00
|
|
|
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
|
|
|
|
static mutex_t mem_mutex;
|
|
|
|
#endif
|
|
|
|
|
2021-01-22 06:32:30 -04:00
|
|
|
static const struct memory_region memory_regions[] = { HAL_MEMORY_REGIONS };
|
2019-02-17 07:23:34 -04:00
|
|
|
// the first memory region is already setup as the ChibiOS
|
|
|
|
// default heap, so we will index from 1 in the allocators
|
|
|
|
#define NUM_MEMORY_REGIONS (sizeof(memory_regions)/sizeof(memory_regions[0]))
|
|
|
|
|
2018-03-28 23:07:50 -03:00
|
|
|
#if CH_CFG_USE_HEAP == TRUE
|
|
|
|
|
2019-02-20 02:35:10 -04:00
|
|
|
static memory_heap_t heaps[NUM_MEMORY_REGIONS];
|
|
|
|
|
2020-11-13 01:11:19 -04:00
|
|
|
#define MIN_ALIGNMENT 8U
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2019-02-09 17:51:33 -04:00
|
|
|
#if defined(STM32H7)
|
2020-11-13 01:11:19 -04:00
|
|
|
#define DMA_ALIGNMENT 32U
|
2019-02-09 17:51:33 -04:00
|
|
|
#else
|
2020-11-13 01:11:19 -04:00
|
|
|
#define DMA_ALIGNMENT 8U
|
2019-02-09 17:51:33 -04:00
|
|
|
#endif
|
|
|
|
|
2018-07-19 20:06:02 -03:00
|
|
|
// size of memory reserved for dma-capable alloc
|
|
|
|
#ifndef DMA_RESERVE_SIZE
|
2020-01-17 00:58:07 -04:00
|
|
|
#define DMA_RESERVE_SIZE 6144
|
2018-07-19 20:06:02 -03:00
|
|
|
#endif
|
|
|
|
|
2018-10-31 19:04:40 -03:00
|
|
|
#if DMA_RESERVE_SIZE != 0
|
2018-07-19 20:06:02 -03:00
|
|
|
static memory_heap_t dma_reserve_heap;
|
2018-10-31 19:04:40 -03:00
|
|
|
#endif
|
2018-07-19 20:06:02 -03:00
|
|
|
|
|
|
|
/*
|
|
|
|
initialise memory handling
|
|
|
|
*/
|
|
|
|
void malloc_init(void)
|
|
|
|
{
|
2020-11-13 17:48:13 -04:00
|
|
|
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
|
|
|
|
chMtxObjectInit(&mem_mutex);
|
|
|
|
#endif
|
|
|
|
|
2020-11-14 17:23:45 -04:00
|
|
|
#if defined(STM32H7)
|
|
|
|
// zero first 1k of ITCM. We leave 1k free to avoid addresses
|
|
|
|
// close to nullptr being valid. Zeroing it here means we can
|
|
|
|
// check for changes which indicate a write to an uninitialised
|
|
|
|
// object. We start at address 0x1 as writing the first byte
|
|
|
|
// causes a fault
|
2022-10-04 06:11:53 -03:00
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Warray-bounds"
|
|
|
|
#if defined(__GNUC__) && __GNUC__ >= 10
|
|
|
|
#pragma GCC diagnostic ignored "-Wstringop-overflow"
|
|
|
|
#endif
|
2020-11-14 17:23:45 -04:00
|
|
|
memset((void*)0x00000001, 0, 1023);
|
2022-10-04 06:11:53 -03:00
|
|
|
#pragma GCC diagnostic pop
|
2020-11-14 17:23:45 -04:00
|
|
|
#endif
|
|
|
|
|
2019-02-21 20:45:03 -04:00
|
|
|
uint8_t i;
|
|
|
|
for (i=1; i<NUM_MEMORY_REGIONS; i++) {
|
2019-02-20 02:35:10 -04:00
|
|
|
chHeapObjectInit(&heaps[i], memory_regions[i].address, memory_regions[i].size);
|
2019-02-17 07:23:34 -04:00
|
|
|
}
|
2018-07-19 20:06:02 -03:00
|
|
|
|
2018-08-29 10:18:55 -03:00
|
|
|
#if DMA_RESERVE_SIZE != 0
|
2018-07-19 20:06:02 -03:00
|
|
|
/*
|
|
|
|
create a DMA reserve heap, to ensure we keep some memory for DMA
|
|
|
|
safe memory allocations
|
|
|
|
*/
|
2020-01-17 00:58:07 -04:00
|
|
|
uint32_t reserve_size = DMA_RESERVE_SIZE;
|
|
|
|
while (reserve_size > 0) {
|
|
|
|
void *dma_reserve = malloc_dma(reserve_size);
|
|
|
|
if (dma_reserve != NULL) {
|
|
|
|
chHeapObjectInit(&dma_reserve_heap, dma_reserve, reserve_size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
reserve_size = (reserve_size * 7) / 8;
|
|
|
|
}
|
2018-05-31 22:09:05 -03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-11-13 01:11:19 -04:00
|
|
|
/*
|
|
|
|
allocate memory, using flags from MEM_REGION_FLAG_* to determine
|
|
|
|
memory type
|
|
|
|
*/
|
2019-02-17 07:23:34 -04:00
|
|
|
static void *malloc_flags(size_t size, uint32_t flags)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2018-01-05 03:20:49 -04:00
|
|
|
if (size == 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-07-15 03:20:16 -03:00
|
|
|
const uint8_t dma_flags = (MEM_REGION_FLAG_DMA_OK | MEM_REGION_FLAG_AXI_BUS | MEM_REGION_FLAG_ETH_SAFE);
|
2023-08-02 03:54:02 -03:00
|
|
|
size_t alignment = (flags&dma_flags?DMA_ALIGNMENT:MIN_ALIGNMENT);
|
|
|
|
if (flags & MEM_REGION_FLAG_ETH_SAFE) {
|
|
|
|
// alignment needs to same as size
|
|
|
|
alignment = size;
|
|
|
|
// also size needs to be power of 2, if not return NULL
|
|
|
|
if ((size & (size-1)) != 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2019-02-17 07:23:34 -04:00
|
|
|
void *p = NULL;
|
2019-02-21 20:45:03 -04:00
|
|
|
uint8_t i;
|
2019-02-17 07:23:34 -04:00
|
|
|
|
2019-03-06 20:21:51 -04:00
|
|
|
if (flags & dma_flags) {
|
2019-02-17 07:23:34 -04:00
|
|
|
// allocate multiple of DMA alignment
|
|
|
|
size = (size + (DMA_ALIGNMENT-1)) & ~(DMA_ALIGNMENT-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// if no flags are set or this is a DMA request and default heap
|
|
|
|
// is DMA safe then start with default heap
|
|
|
|
if (flags == 0 || (flags == MEM_REGION_FLAG_DMA_OK &&
|
|
|
|
(memory_regions[0].flags & MEM_REGION_FLAG_DMA_OK))) {
|
|
|
|
p = chHeapAllocAligned(NULL, size, alignment);
|
|
|
|
if (p) {
|
|
|
|
goto found;
|
|
|
|
}
|
2018-05-31 22:09:05 -03:00
|
|
|
}
|
2019-02-17 07:23:34 -04:00
|
|
|
|
|
|
|
// try with matching flags
|
2019-02-21 20:45:03 -04:00
|
|
|
for (i=1; i<NUM_MEMORY_REGIONS; i++) {
|
2019-02-17 07:23:34 -04:00
|
|
|
if ((flags & MEM_REGION_FLAG_DMA_OK) &&
|
|
|
|
!(memory_regions[i].flags & MEM_REGION_FLAG_DMA_OK)) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-05-29 16:47:40 -03:00
|
|
|
if ((flags & MEM_REGION_FLAG_AXI_BUS) &&
|
|
|
|
!(memory_regions[i].flags & MEM_REGION_FLAG_AXI_BUS)) {
|
2019-03-06 20:21:51 -04:00
|
|
|
continue;
|
|
|
|
}
|
2019-02-17 07:23:34 -04:00
|
|
|
if ((flags & MEM_REGION_FLAG_FAST) &&
|
|
|
|
!(memory_regions[i].flags & MEM_REGION_FLAG_FAST)) {
|
|
|
|
continue;
|
|
|
|
}
|
2023-07-15 03:20:16 -03:00
|
|
|
if ((flags & MEM_REGION_FLAG_ETH_SAFE) &&
|
|
|
|
!(memory_regions[i].flags & MEM_REGION_FLAG_ETH_SAFE)) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-02-20 02:35:10 -04:00
|
|
|
p = chHeapAllocAligned(&heaps[i], size, alignment);
|
2018-05-31 22:09:05 -03:00
|
|
|
if (p) {
|
2019-02-17 07:23:34 -04:00
|
|
|
goto found;
|
2018-05-31 22:09:05 -03:00
|
|
|
}
|
2018-01-05 03:20:49 -04:00
|
|
|
}
|
2019-02-17 07:23:34 -04:00
|
|
|
|
|
|
|
// if this is a not a DMA request then we can fall back to any heap
|
2019-03-06 20:21:51 -04:00
|
|
|
if (!(flags & dma_flags)) {
|
2019-02-21 20:45:03 -04:00
|
|
|
for (i=1; i<NUM_MEMORY_REGIONS; i++) {
|
2019-02-20 02:35:10 -04:00
|
|
|
p = chHeapAllocAligned(&heaps[i], size, alignment);
|
2019-02-17 07:23:34 -04:00
|
|
|
if (p) {
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// try default heap
|
|
|
|
p = chHeapAllocAligned(NULL, size, alignment);
|
2018-05-31 22:09:05 -03:00
|
|
|
if (p) {
|
2019-02-17 07:23:34 -04:00
|
|
|
goto found;
|
2018-05-31 22:09:05 -03:00
|
|
|
}
|
|
|
|
}
|
2018-07-19 20:06:02 -03:00
|
|
|
|
2018-08-29 10:18:55 -03:00
|
|
|
#if DMA_RESERVE_SIZE != 0
|
2018-07-19 20:06:02 -03:00
|
|
|
// fall back to DMA reserve
|
2019-02-17 07:23:34 -04:00
|
|
|
p = chHeapAllocAligned(&dma_reserve_heap, size, alignment);
|
2018-07-19 20:06:02 -03:00
|
|
|
if (p) {
|
|
|
|
memset(p, 0, size);
|
|
|
|
return p;
|
|
|
|
}
|
2018-08-29 10:18:55 -03:00
|
|
|
#endif
|
2019-02-17 07:23:34 -04:00
|
|
|
|
|
|
|
// failed
|
2018-05-31 22:09:05 -03:00
|
|
|
return NULL;
|
2019-02-17 07:23:34 -04:00
|
|
|
|
|
|
|
found:
|
|
|
|
memset(p, 0, size);
|
|
|
|
return p;
|
|
|
|
}
|
2020-11-13 01:11:19 -04:00
|
|
|
|
|
|
|
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
|
2020-11-13 17:48:13 -04:00
|
|
|
/*
|
|
|
|
memory guard system. We put all allocated memory in a doubly linked
|
|
|
|
list and add canary bytes at the front and back of all
|
|
|
|
allocations. On all free operations, plus on calls to malloc_check()
|
|
|
|
we walk the list and check for memory corruption, flagging an
|
|
|
|
internal error if one is found
|
|
|
|
*/
|
|
|
|
struct memguard {
|
|
|
|
uint32_t size;
|
|
|
|
uint32_t inv_size;
|
|
|
|
struct memguard *next, *prev;
|
|
|
|
uint32_t pad[4]; // pad to 32 bytes
|
|
|
|
};
|
|
|
|
static struct memguard *mg_head;
|
2020-11-13 01:11:19 -04:00
|
|
|
|
2020-11-13 17:48:13 -04:00
|
|
|
#define MALLOC_HEAD_SIZE sizeof(struct memguard)
|
2020-11-13 01:11:19 -04:00
|
|
|
#define MALLOC_GUARD_SIZE DMA_ALIGNMENT
|
|
|
|
#define MALLOC_GUARD1_START 73
|
|
|
|
#define MALLOC_GUARD2_START 172
|
|
|
|
|
|
|
|
/*
|
|
|
|
optional malloc guard regions
|
|
|
|
*/
|
|
|
|
static void *malloc_flags_guard(size_t size, uint32_t flags)
|
|
|
|
{
|
2020-11-13 17:48:13 -04:00
|
|
|
chMtxLock(&mem_mutex);
|
|
|
|
|
2021-05-29 16:47:40 -03:00
|
|
|
if (flags & (MEM_REGION_FLAG_DMA_OK | MEM_REGION_FLAG_AXI_BUS)) {
|
2020-11-13 01:11:19 -04:00
|
|
|
size = (size + (DMA_ALIGNMENT-1U)) & ~(DMA_ALIGNMENT-1U);
|
|
|
|
} else {
|
|
|
|
size = (size + (MIN_ALIGNMENT-1U)) & ~(MIN_ALIGNMENT-1U);
|
|
|
|
}
|
|
|
|
void *ret = malloc_flags(size+MALLOC_GUARD_SIZE*2+MALLOC_HEAD_SIZE, flags);
|
|
|
|
if (!ret) {
|
2020-11-13 17:48:13 -04:00
|
|
|
chMtxUnlock(&mem_mutex);
|
2020-11-13 01:11:19 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-11-13 17:48:13 -04:00
|
|
|
struct memguard *mg = (struct memguard *)ret;
|
|
|
|
uint8_t *b1 = (uint8_t *)&mg[1];
|
2020-11-13 01:11:19 -04:00
|
|
|
uint8_t *b2 = b1 + MALLOC_GUARD_SIZE + size;
|
2020-11-13 17:48:13 -04:00
|
|
|
mg->size = size;
|
|
|
|
mg->inv_size = ~size;
|
2020-11-13 01:11:19 -04:00
|
|
|
for (uint32_t i=0; i<MALLOC_GUARD_SIZE; i++) {
|
|
|
|
b1[i] = (uint8_t)(MALLOC_GUARD1_START + i);
|
|
|
|
b2[i] = (uint8_t)(MALLOC_GUARD2_START + i);
|
|
|
|
}
|
2020-11-13 17:48:13 -04:00
|
|
|
|
|
|
|
if (mg_head != NULL) {
|
|
|
|
mg->next = mg_head;
|
|
|
|
mg_head->prev = mg;
|
|
|
|
}
|
|
|
|
mg_head = mg;
|
|
|
|
|
|
|
|
chMtxUnlock(&mem_mutex);
|
2020-11-13 01:11:19 -04:00
|
|
|
return (void *)(b1+MALLOC_GUARD_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void AP_memory_guard_error(uint32_t size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
check for errors in malloc memory using guard bytes
|
|
|
|
*/
|
2020-11-13 17:48:13 -04:00
|
|
|
void malloc_check_mg(const struct memguard *mg)
|
2020-11-13 01:11:19 -04:00
|
|
|
{
|
2020-11-13 17:48:13 -04:00
|
|
|
if (mg->size != ~mg->inv_size) {
|
2020-11-13 01:11:19 -04:00
|
|
|
AP_memory_guard_error(0);
|
|
|
|
return;
|
|
|
|
}
|
2020-11-13 17:48:13 -04:00
|
|
|
const uint32_t size = mg->size;
|
|
|
|
const uint8_t *b1 = (uint8_t *)&mg[1];
|
|
|
|
const uint8_t *b2 = b1 + MALLOC_GUARD_SIZE + size;
|
2020-11-13 01:11:19 -04:00
|
|
|
for (uint32_t i=0; i<MALLOC_GUARD_SIZE; i++) {
|
|
|
|
if (b1[i] != (uint8_t)(MALLOC_GUARD1_START + i) ||
|
|
|
|
b2[i] != (uint8_t)(MALLOC_GUARD2_START + i)) {
|
|
|
|
AP_memory_guard_error(size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-13 17:48:13 -04:00
|
|
|
/*
|
|
|
|
check for errors across entire allocation list
|
|
|
|
*/
|
|
|
|
void malloc_check_all(void)
|
|
|
|
{
|
|
|
|
for (struct memguard *mg=mg_head; mg; mg=mg->next) {
|
|
|
|
malloc_check_mg(mg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
check for errors in malloc memory using guard bytes
|
|
|
|
*/
|
|
|
|
void malloc_check(const void *p)
|
|
|
|
{
|
|
|
|
if (p == NULL) {
|
|
|
|
// allow for malloc_check(nullptr) to check all allocated memory
|
|
|
|
chMtxLock(&mem_mutex);
|
|
|
|
malloc_check_all();
|
|
|
|
chMtxUnlock(&mem_mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (((uintptr_t)p) & 3) {
|
|
|
|
// misaligned memory
|
|
|
|
AP_memory_guard_error(0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
chMtxLock(&mem_mutex);
|
|
|
|
struct memguard *mg = (struct memguard *)(((uint8_t *)p) - (MALLOC_GUARD_SIZE+MALLOC_HEAD_SIZE));
|
|
|
|
malloc_check_mg(mg);
|
|
|
|
malloc_check_all();
|
|
|
|
chMtxUnlock(&mem_mutex);
|
|
|
|
}
|
|
|
|
|
2020-11-13 01:11:19 -04:00
|
|
|
static void free_guard(void *p)
|
|
|
|
{
|
2020-11-13 17:48:13 -04:00
|
|
|
chMtxLock(&mem_mutex);
|
2020-11-13 01:11:19 -04:00
|
|
|
malloc_check(p);
|
2020-11-13 17:48:13 -04:00
|
|
|
struct memguard *mg = (struct memguard *)(((uint8_t *)p) - (MALLOC_GUARD_SIZE+MALLOC_HEAD_SIZE));
|
|
|
|
if (mg->next) {
|
|
|
|
mg->next->prev = mg->prev;
|
|
|
|
}
|
|
|
|
if (mg->prev) {
|
|
|
|
mg->prev->next = mg->next;
|
|
|
|
}
|
|
|
|
if (mg == mg_head) {
|
|
|
|
mg_head = mg->next;
|
|
|
|
}
|
2020-11-13 01:11:19 -04:00
|
|
|
chHeapFree((void*)(((uint8_t *)p) - (MALLOC_GUARD_SIZE+MALLOC_HEAD_SIZE)));
|
2020-11-13 17:48:13 -04:00
|
|
|
chMtxUnlock(&mem_mutex);
|
2020-11-13 01:11:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define malloc_flags(size, flags) malloc_flags_guard(size, flags)
|
|
|
|
|
|
|
|
#else // HAL_CHIBIOS_ENABLE_MALLOC_GUARD
|
|
|
|
|
|
|
|
void malloc_check(const void *p)
|
|
|
|
{
|
|
|
|
(void)p;
|
|
|
|
}
|
|
|
|
#endif // HAL_CHIBIOS_ENABLE_MALLOC_GUARD
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-02-17 07:23:34 -04:00
|
|
|
/*
|
|
|
|
allocate normal memory
|
|
|
|
*/
|
|
|
|
void *malloc(size_t size)
|
|
|
|
{
|
|
|
|
return malloc_flags(size, 0);
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
|
2018-06-02 00:27:02 -03:00
|
|
|
/*
|
2018-07-19 20:06:02 -03:00
|
|
|
allocate DMA-safe memory
|
2018-06-02 00:27:02 -03:00
|
|
|
*/
|
|
|
|
void *malloc_dma(size_t size)
|
|
|
|
{
|
2019-02-17 07:23:34 -04:00
|
|
|
return malloc_flags(size, MEM_REGION_FLAG_DMA_OK);
|
|
|
|
}
|
2019-02-09 17:51:33 -04:00
|
|
|
|
2019-03-06 20:21:51 -04:00
|
|
|
/*
|
2021-05-29 16:47:40 -03:00
|
|
|
allocate from memory connected to AXI Bus if available
|
|
|
|
else just allocate dma safe memory
|
2019-03-06 20:21:51 -04:00
|
|
|
*/
|
2021-05-29 16:47:40 -03:00
|
|
|
void *malloc_axi_sram(size_t size)
|
2019-03-06 20:21:51 -04:00
|
|
|
{
|
|
|
|
#if defined(STM32H7)
|
2021-05-29 16:47:40 -03:00
|
|
|
return malloc_flags(size, MEM_REGION_FLAG_AXI_BUS);
|
2019-03-06 20:21:51 -04:00
|
|
|
#else
|
|
|
|
return malloc_flags(size, MEM_REGION_FLAG_DMA_OK);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-07-15 03:20:16 -03:00
|
|
|
/*
|
|
|
|
allocate memory for ethernet DMA
|
|
|
|
*/
|
|
|
|
void *malloc_eth_safe(size_t size)
|
|
|
|
{
|
|
|
|
#if defined(STM32H7)
|
|
|
|
return malloc_flags(size, MEM_REGION_FLAG_ETH_SAFE);
|
|
|
|
#else
|
2023-07-28 18:30:15 -03:00
|
|
|
(void)size;
|
2023-07-15 03:20:16 -03:00
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-02-17 07:23:34 -04:00
|
|
|
/*
|
|
|
|
allocate fast memory
|
|
|
|
*/
|
|
|
|
void *malloc_fastmem(size_t size)
|
|
|
|
{
|
|
|
|
return malloc_flags(size, MEM_REGION_FLAG_FAST);
|
2018-06-02 00:27:02 -03:00
|
|
|
}
|
|
|
|
|
2018-01-05 02:19:51 -04:00
|
|
|
void *calloc(size_t nmemb, size_t size)
|
|
|
|
{
|
2018-01-09 17:18:28 -04:00
|
|
|
return malloc(nmemb * size);
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void free(void *ptr)
|
|
|
|
{
|
|
|
|
if(ptr != NULL) {
|
2020-11-13 01:11:19 -04:00
|
|
|
#ifdef HAL_CHIBIOS_ENABLE_MALLOC_GUARD
|
|
|
|
free_guard(ptr);
|
|
|
|
#else
|
2018-01-05 02:19:51 -04:00
|
|
|
chHeapFree(ptr);
|
2020-11-13 01:11:19 -04:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
}
|
2018-01-09 17:18:28 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
return total available memory in bytes
|
|
|
|
*/
|
|
|
|
size_t mem_available(void)
|
|
|
|
{
|
|
|
|
size_t totalp = 0;
|
2019-02-21 20:45:03 -04:00
|
|
|
uint8_t i;
|
2019-02-17 07:23:34 -04:00
|
|
|
|
2018-01-09 17:18:28 -04:00
|
|
|
// get memory available on main heap
|
|
|
|
chHeapStatus(NULL, &totalp, NULL);
|
|
|
|
|
|
|
|
// we also need to add in memory that is not yet allocated to the heap
|
|
|
|
totalp += chCoreGetStatusX();
|
|
|
|
|
2019-02-17 07:23:34 -04:00
|
|
|
// now our own heaps
|
2019-02-21 20:45:03 -04:00
|
|
|
for (i=1; i<NUM_MEMORY_REGIONS; i++) {
|
2019-02-17 07:23:34 -04:00
|
|
|
size_t available = 0;
|
2019-02-20 02:35:10 -04:00
|
|
|
chHeapStatus(&heaps[i], &available, NULL);
|
2019-02-17 07:23:34 -04:00
|
|
|
totalp += available;
|
|
|
|
}
|
2018-07-19 20:06:02 -03:00
|
|
|
|
2018-08-29 10:18:55 -03:00
|
|
|
#if DMA_RESERVE_SIZE != 0
|
2019-02-17 07:23:34 -04:00
|
|
|
// and reserve DMA heap
|
|
|
|
size_t available = 0;
|
|
|
|
chHeapStatus(&dma_reserve_heap, &available, NULL);
|
|
|
|
totalp += available;
|
2018-08-29 10:18:55 -03:00
|
|
|
#endif
|
2018-07-19 20:06:02 -03:00
|
|
|
|
2018-01-09 17:18:28 -04:00
|
|
|
return totalp;
|
|
|
|
}
|
2018-03-28 23:07:50 -03:00
|
|
|
|
2021-09-04 08:59:15 -03:00
|
|
|
#if CH_CFG_USE_DYNAMIC == TRUE
|
2019-02-26 20:14:58 -04:00
|
|
|
/*
|
|
|
|
allocate a thread on any available heap
|
|
|
|
*/
|
|
|
|
thread_t *thread_create_alloc(size_t size,
|
|
|
|
const char *name, tprio_t prio,
|
|
|
|
tfunc_t pf, void *arg)
|
|
|
|
{
|
|
|
|
thread_t *ret;
|
|
|
|
// first try default heap
|
|
|
|
ret = chThdCreateFromHeap(NULL, size, name, prio, pf, arg);
|
|
|
|
if (ret != NULL) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// now try other heaps
|
|
|
|
uint8_t i;
|
|
|
|
for (i=1; i<NUM_MEMORY_REGIONS; i++) {
|
|
|
|
ret = chThdCreateFromHeap(&heaps[i], size, name, prio, pf, arg);
|
|
|
|
if (ret != NULL) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2021-09-04 08:59:15 -03:00
|
|
|
#endif
|
2019-02-26 20:14:58 -04:00
|
|
|
|
2021-01-22 06:32:30 -04:00
|
|
|
/*
|
|
|
|
return heap information
|
|
|
|
*/
|
|
|
|
uint8_t malloc_get_heaps(memory_heap_t **_heaps, const struct memory_region **regions)
|
|
|
|
{
|
|
|
|
*_heaps = &heaps[0];
|
|
|
|
*regions = &memory_regions[0];
|
|
|
|
return NUM_MEMORY_REGIONS;
|
|
|
|
}
|
|
|
|
|
2018-03-28 23:07:50 -03:00
|
|
|
#endif // CH_CFG_USE_HEAP
|
2019-02-17 07:23:34 -04:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
flush all memory. Used in chSysHalt()
|
|
|
|
*/
|
|
|
|
void memory_flush_all(void)
|
|
|
|
{
|
2019-02-21 20:45:03 -04:00
|
|
|
uint8_t i;
|
|
|
|
for (i=0; i<NUM_MEMORY_REGIONS; i++) {
|
2019-08-02 07:57:01 -03:00
|
|
|
stm32_cacheBufferFlush(memory_regions[i].address, memory_regions[i].size);
|
2019-02-17 07:23:34 -04:00
|
|
|
}
|
|
|
|
}
|
2020-03-12 00:24:42 -03:00
|
|
|
|
|
|
|
/*
|
|
|
|
replacement for strdup
|
|
|
|
*/
|
|
|
|
char *strdup(const char *str)
|
|
|
|
{
|
|
|
|
const size_t len = strlen(str);
|
|
|
|
char *ret = malloc(len+1);
|
|
|
|
if (!ret) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy(ret, str, len);
|
|
|
|
ret[len] = 0;
|
|
|
|
return ret;
|
|
|
|
}
|
2021-10-23 03:36:39 -03:00
|
|
|
|
|
|
|
/*
|
|
|
|
is valid memory region
|
|
|
|
*/
|
|
|
|
bool is_address_in_memory(void *addr)
|
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
for (i=0; i<NUM_MEMORY_REGIONS; i++) {
|
|
|
|
if (addr >= memory_regions[i].address &&
|
|
|
|
addr < (memory_regions[i].address + memory_regions[i].size)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
return the start of memory region that contains the address
|
|
|
|
*/
|
|
|
|
void* get_addr_mem_region_start_addr(void *addr)
|
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
for (i=0; i<NUM_MEMORY_REGIONS; i++) {
|
|
|
|
if (addr >= memory_regions[i].address &&
|
|
|
|
addr < (memory_regions[i].address + memory_regions[i].size)) {
|
|
|
|
return memory_regions[i].address;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
return the end of memory region that contains the address
|
|
|
|
*/
|
|
|
|
void* get_addr_mem_region_end_addr(void *addr)
|
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
for (i=0; i<NUM_MEMORY_REGIONS; i++) {
|
|
|
|
if (addr >= memory_regions[i].address &&
|
|
|
|
addr < (memory_regions[i].address + memory_regions[i].size)) {
|
|
|
|
return memory_regions[i].address + memory_regions[i].size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2023-06-02 21:32:23 -03:00
|
|
|
|
2023-07-04 01:56:09 -03:00
|
|
|
/*
|
|
|
|
alloction functions for newlib
|
|
|
|
*/
|
|
|
|
void *__wrap__calloc_r(void *rptr, size_t nmemb, size_t size)
|
|
|
|
{
|
|
|
|
(void)rptr;
|
|
|
|
return calloc(nmemb, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *__wrap__malloc_r(void *rptr, size_t size)
|
|
|
|
{
|
|
|
|
(void)rptr;
|
|
|
|
// we want consistent zero memory
|
|
|
|
return calloc(1, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __wrap__free_r(void *rptr, void *ptr)
|
|
|
|
{
|
|
|
|
(void)rptr;
|
|
|
|
return free(ptr);
|
|
|
|
}
|
|
|
|
|
2023-06-02 21:32:23 -03:00
|
|
|
#ifdef USE_POSIX
|
|
|
|
/*
|
|
|
|
allocation functions for FATFS
|
|
|
|
*/
|
|
|
|
void *ff_memalloc(unsigned msize)
|
|
|
|
{
|
|
|
|
if (msize > 4096) {
|
|
|
|
// refuse large sizes. FATFS tries for 32k blocks for creating
|
|
|
|
// directories which ends up trying to allocate 64k with the
|
|
|
|
// DMA bouncebuffer, and this can cause filesystem operation
|
|
|
|
// failures. We want FATFS to limit itself to 4k blocks, which
|
|
|
|
// it does when the allocation of the larger size fails
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
// try to get DMA capable memory which results in less copying so
|
|
|
|
// faster access
|
|
|
|
void *ret = malloc_axi_sram(msize);
|
|
|
|
if (ret != NULL) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
// fallback to any memory, which means we will use the
|
|
|
|
// preallocated bouncebuffer on systems where general purpose
|
|
|
|
// memory cannot be used for microSD access
|
|
|
|
return malloc(msize);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ff_memfree(void* mblock)
|
|
|
|
{
|
|
|
|
free(mblock);
|
|
|
|
}
|
|
|
|
#endif // USE_POSIX
|
2024-01-04 22:34:37 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
return true if a memory region is safe for a DMA operation
|
|
|
|
*/
|
|
|
|
bool mem_is_dma_safe(const void *addr, uint32_t size, bool filesystem_op)
|
|
|
|
{
|
|
|
|
(void)filesystem_op;
|
|
|
|
#if defined(STM32F1)
|
|
|
|
// F1 is always OK
|
|
|
|
(void)addr;
|
|
|
|
(void)size;
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
uint32_t flags = MEM_REGION_FLAG_DMA_OK;
|
|
|
|
#if defined(STM32H7)
|
2024-01-07 01:20:42 -04:00
|
|
|
if (!filesystem_op) {
|
|
|
|
// use bouncebuffer for all non FS ops on H7
|
|
|
|
return false;
|
|
|
|
}
|
2024-03-16 06:07:40 -03:00
|
|
|
if ((((uint32_t)addr) & 0x1F) != 0 || (size & 0x1F) != 0) {
|
2024-01-04 22:34:37 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (filesystem_op) {
|
|
|
|
flags = MEM_REGION_FLAG_AXI_BUS;
|
|
|
|
}
|
|
|
|
#elif defined(STM32F4)
|
|
|
|
if (((uint32_t)addr) & 0x01) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (((uint32_t)addr) & 0x07) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
for (uint8_t i=0; i<NUM_MEMORY_REGIONS; i++) {
|
|
|
|
if (memory_regions[i].flags & flags) {
|
|
|
|
if ((uint32_t)addr >= (uint32_t)memory_regions[i].address &&
|
|
|
|
((uint32_t)addr + size) <= ((uint32_t)memory_regions[i].address + memory_regions[i].size)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
#endif // STM32F1
|
|
|
|
}
|