Remove ucannode_gps_demo since it's superseded by uavcan_v1

This commit is contained in:
Peter van der Perk 2021-11-30 15:52:53 +01:00 committed by Daniel Agar
parent ce909b23b1
commit ce10dd90e7
18 changed files with 0 additions and 2275 deletions

6
.gitmodules vendored
View File

@ -42,12 +42,6 @@
[submodule "src/drivers/uavcan_v1/public_regulated_data_types"] [submodule "src/drivers/uavcan_v1/public_regulated_data_types"]
path = src/drivers/uavcan_v1/public_regulated_data_types path = src/drivers/uavcan_v1/public_regulated_data_types
url = https://github.com/UAVCAN/public_regulated_data_types.git url = https://github.com/UAVCAN/public_regulated_data_types.git
[submodule "src/drivers/uavcannode_gps_demo/public_regulated_data_types"]
path = src/drivers/uavcannode_gps_demo/public_regulated_data_types
url = https://github.com/UAVCAN/public_regulated_data_types.git
[submodule "src/drivers/uavcannode_gps_demo/libcanard"]
path = src/drivers/uavcannode_gps_demo/libcanard
url = https://github.com/UAVCAN/libcanard.git
[submodule "src/drivers/uavcan_v1/legacy_data_types"] [submodule "src/drivers/uavcan_v1/legacy_data_types"]
path = src/drivers/uavcan_v1/legacy_data_types path = src/drivers/uavcan_v1/legacy_data_types
url = https://github.com/PX4/public_regulated_data_types.git url = https://github.com/PX4/public_regulated_data_types.git

View File

@ -1,83 +0,0 @@
############################################################################
#
# Copyright (c) 2020 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
set(LIBCANARD_DIR ${CMAKE_CURRENT_SOURCE_DIR}/libcanard)
set(DSDL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/public_regulated_data_types)
px4_add_git_submodule(TARGET git_libcanard PATH ${LIBCANARD_DIR})
px4_add_git_submodule(TARGET git_public_regulated_data_types PATH ${DSDL_DIR})
find_program(NNVG_PATH nnvg)
if(NNVG_PATH)
execute_process(COMMAND ${NNVG_PATH} --outdir ${CMAKE_CURRENT_BINARY_DIR}/dsdlc_generated --target-language c -I ${DSDL_DIR}/uavcan ${DSDL_DIR}/reg)
execute_process(COMMAND ${NNVG_PATH} --outdir ${CMAKE_CURRENT_BINARY_DIR}/dsdlc_generated --target-language c ${DSDL_DIR}/uavcan)
else()
message(FATAL_ERROR "UAVCAN Nunavut nnvg not found")
endif()
add_definitions(
-DCONFIG_EXAMPLES_LIBCANARDV1_DEV="can0"
-DCONFIG_EXAMPLES_LIBCANARDV1_NODE_MEM_POOL_SIZE=8192
-DCONFIG_EXAMPLES_LIBCANARDV1_DAEMON_STACK_SIZE=5000
-DCONFIG_EXAMPLES_LIBCANARDV1_DAEMON_PRIORITY=100
-DCANARD_DSDL_CONFIG_LITTLE_ENDIAN=1
)
px4_add_module(
MODULE drivers__uavcannode-gps-demo
MAIN uavcannode_gps_demo
COMPILE_FLAGS
-Wno-error
-DUINT32_C\(x\)=__UINT32_C\(x\)
INCLUDES
${LIBCANARD_DIR}/libcanard/
${CMAKE_CURRENT_BINARY_DIR}/dsdlc_generated
SRCS
canard_main.c
socketcan.c
socketcan.h
uorb_converter.c
uorb_converter.h
o1heap.c
o1heap.h
libcancl/pnp.c
libcancl/registerinterface.c
${LIBCANARD_DIR}/libcanard/canard.c
${LIBCANARD_DIR}/libcanard/canard.h
DEPENDS
git_libcanard
git_public_regulated_data_types
drivers_bootloaders
version
)

View File

@ -1,5 +0,0 @@
menuconfig DRIVERS_UAVCANNODE_GPS_DEMO
bool "uavcannode_gps_demo"
default n
---help---
Enable support for uavcannode_gps_demo

View File

@ -1,469 +0,0 @@
/****************************************************************************
* examples/canard/canard_main.c
*
* Copyright (C) 2016 ETH Zuerich. All rights reserved.
* Author: Matthias Renner <rennerm@ethz.ch>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <canard.h>
#include <canard_dsdl.h>
#include <sched.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <net/if.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <poll.h>
#include <nuttx/can.h>
#include <netpacket/can.h>
#include "socketcan.h"
#include "o1heap.h"
#include "uorb_converter.h"
#include "libcancl/pnp.h"
#include "libcancl/registerinterface.h"
#include "boot_app_shared.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Arena for memory allocation, used by the library */
#define O1_HEAP_SIZE CONFIG_EXAMPLES_LIBCANARDV1_NODE_MEM_POOL_SIZE
#define UNIQUE_ID_LENGTH_BYTES 16
/****************************************************************************
* Private Data
****************************************************************************/
/*
* This is the AppImageDescriptor used
* by the make_can_boot_descriptor.py tool to set
* the application image's descriptor so that the
* uavcan bootloader has the ability to validate the
* image crc, size etc of this application
*/
boot_app_shared_section app_descriptor_t AppDescriptor = {
.signature = APP_DESCRIPTOR_SIGNATURE,
.image_crc = 0,
.image_size = 0,
.git_hash = 0,
.major_version = APP_VERSION_MAJOR,
.minor_version = APP_VERSION_MINOR,
.board_id = HW_VERSION_MAJOR << 8 | HW_VERSION_MINOR,
.reserved = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
};
O1HeapInstance *my_allocator;
static uint8_t uavcan_heap[O1_HEAP_SIZE]
__attribute__((aligned(O1HEAP_ALIGNMENT)));
static uint8_t unique_id[UNIQUE_ID_LENGTH_BYTES] = { //FIXME real HW ID
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01
};
/* Node status variables */
static bool g_canard_daemon_started;
static int16_t gps_uorb_port_id = -1;
static int16_t gps_fix_port_id = -1;
static int16_t gps_aux_port_id = -1;
struct pollfd fd;
/****************************************************************************
* private functions
****************************************************************************/
//TODO move this to a seperate file probably
int32_t set_gps_uorb_port_id(uavcan_register_Value_1_0 *value)
{
if (uavcan_register_Value_1_0_is_natural16_(value) && value->natural16.value.count == 1) { // Natural 16
//TODO check validity
printf("Master: set uORB portID to %i\n", value->natural16.value.elements[0]);
gps_uorb_port_id = value->natural16.value.elements[0];
return 0;
}
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
uavcan_register_Value_1_0 get_gps_uorb_port_id()
{
void *dataReturn;
uavcan_register_Value_1_0 value;
value.natural16.value.elements[0] = gps_uorb_port_id;
value.natural16.value.count = 1;
value._tag_ = 10; // TODO does nunavut generate ENUM/defines for this??
return value;
}
int32_t set_gps_fix_port_id(uavcan_register_Value_1_0 *value)
{
if (uavcan_register_Value_1_0_is_natural16_(value) && value->natural16.value.count == 1) { // Natural 16
//TODO check validity
printf("Master: set FIX portID to %i\n", value->natural16.value.elements[0]);
gps_fix_port_id = value->natural16.value.elements[0];
return 0;
}
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
uavcan_register_Value_1_0 get_gps_fix_port_id()
{
void *dataReturn;
uavcan_register_Value_1_0 value;
value.natural16.value.elements[0] = gps_fix_port_id;
value.natural16.value.count = 1;
value._tag_ = 10; // TODO does nunavut generate ENUM/defines for this??
return value;
}
int32_t set_gps_aux_port_id(uavcan_register_Value_1_0 *value)
{
if (uavcan_register_Value_1_0_is_natural16_(value) && value->natural16.value.count == 1) { // Natural 16
//TODO check validity
printf("Master: set AUX portID to %i\n", value->natural16.value.elements[0]);
gps_fix_port_id = value->natural16.value.elements[0];
return 0;
}
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
uavcan_register_Value_1_0 get_gps_aux_port_id()
{
void *dataReturn;
uavcan_register_Value_1_0 value;
value.natural16.value.elements[0] = gps_aux_port_id;
value.natural16.value.count = 1;
value._tag_ = 10; // TODO does nunavut generate ENUM/defines for this??
return value;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: memAllocate
*
* Description:
*
****************************************************************************/
static void *memAllocate(CanardInstance *const ins, const size_t amount)
{
(void) ins;
return o1heapAllocate(my_allocator, amount);
}
/****************************************************************************
* Name: memFree
*
* Description:
*
****************************************************************************/
static void memFree(CanardInstance *const ins, void *const pointer)
{
(void) ins;
o1heapFree(my_allocator, pointer);
}
/****************************************************************************
* Name: getMonotonicTimestampUSec
*
* Description:
*
****************************************************************************/
uint64_t getMonotonicTimestampUSec(void)
{
int ret;
struct timespec ts;
memset(&ts, 0, sizeof(ts));
ret = clock_gettime(CLOCK_MONOTONIC, &ts);
if (ret != 0) {
PX4_ERR("clock error %i", ret);
abort();
}
return ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000ULL;
}
/****************************************************************************
* Name: processTxRxOnce
*
* Description:
* Transmits all frames from the TX queue, receives up to one frame.
*
****************************************************************************/
void processTxRxOnce(CanardInstance *ins, CanardSocketInstance *sock_ins, int timeout_msec)
{
int32_t result;
/* Transmitting */
for (const CanardFrame *txf = NULL; (txf = canardTxPeek(ins)) != NULL;) { // Look at the top of the TX queue.
if (txf->timestamp_usec > getMonotonicTimestampUSec()) { // Check if the frame has timed out.
if (socketcanTransmit(sock_ins, txf) == 0) { // Send the frame. Redundant interfaces may be used here.
break; // If the driver is busy, break and retry later.
}
} else {
printf("Timeout??\n");
}
canardTxPop(ins); // Remove the frame from the queue after it's transmitted.
ins->memory_free(ins, (CanardFrame *)txf); // Deallocate the dynamic memory afterwards.
}
/* Poll receive */
if (poll(&fd, 1, timeout_msec) <= 0) {
return;
}
/* Receiving */
CanardFrame received_frame;
socketcanReceive(sock_ins, &received_frame);
CanardTransfer receive;
result = canardRxAccept(ins,
&received_frame, // The CAN frame received from the bus.
0, // If the transport is not redundant, use 0.
&receive);
if (result < 0) {
// An error has occurred: either an argument is invalid or we've ran out of memory.
// It is possible to statically prove that an out-of-memory will never occur for a given application if
// the heap is sized correctly; for background, refer to the Robson's Proof and the documentation for O1Heap.
// Reception of an invalid frame is NOT an error.
fprintf(stderr, "Receive error %" PRId32 "\n", result);
} else if (result == 1) {
printf("Receive portId %i\n", receive.port_id);
if (receive.port_id == PNPGetPortID(ins)) {
PNPProcess(ins, &receive);
} else {
uavcan_register_interface_process(ins, &receive);
}
ins->memory_free(ins, (void *)receive.payload); // Deallocate the dynamic memory afterwards.
} else {
// printf("RX canard %d\r\n", result);
// Nothing to do.
// The received frame is either invalid or it's a non-last frame of a multi-frame transfer.
// Reception of an invalid frame is NOT reported as an error because it is not an error.
}
}
/****************************************************************************
* Name: canard_daemon
*
* Description:
*
****************************************************************************/
static int canard_daemon(int argc, char *argv[])
{
int errval = 0;
int can_fd = 0;
if (argc > 2) {
for (int args = 2; args < argc; args++) {
if (!strcmp(argv[args], "canfd")) {
can_fd = 1;
}
}
}
my_allocator = o1heapInit(&uavcan_heap, O1_HEAP_SIZE, NULL, NULL);
if (my_allocator == NULL) {
printf("o1heapInit failed with size %d\n", O1_HEAP_SIZE);
errval = 2;
goto errout_with_dev;
}
CanardInstance ins = canardInit(&memAllocate, &memFree);
if (can_fd) {
ins.mtu_bytes = CANARD_MTU_CAN_FD;
} else {
ins.mtu_bytes = CANARD_MTU_CAN_CLASSIC;
}
/* Open the CAN device for reading */
CanardSocketInstance sock_ins;
socketcanOpen(&sock_ins, CONFIG_EXAMPLES_LIBCANARDV1_DEV, can_fd);
/* setup poll fd */
fd.fd = sock_ins.s;
fd.events = POLLIN;
if (sock_ins.s < 0) {
printf("canard_daemon: ERROR: open %s failed: %d\n",
CONFIG_EXAMPLES_LIBCANARDV1_DEV, errno);
errval = 2;
goto errout_with_dev;
}
/* libcancl functions */
/* Dynamic NodeId */
/* Init UAVCAN register interfaces */
uavcan_node_GetInfo_Response_1_0 node_information; // TODO ADD INFO
uavcan_register_interface_init(&ins, &node_information);
uavcan_register_interface_add_entry("uorb.sensor_gps.0", set_gps_uorb_port_id, get_gps_uorb_port_id);
uavcan_register_interface_add_entry("gnss_fix", set_gps_fix_port_id, get_gps_fix_port_id);
uavcan_register_interface_add_entry("gnss_aux", set_gps_aux_port_id, get_gps_aux_port_id);
initPNPAllocatee(&ins, unique_id);
uint32_t random_no;
random_no = ((float)rand() / RAND_MAX) * (1000000);
uint64_t next_alloc_req = getMonotonicTimestampUSec() + random_no;
while (ins.node_id == CANARD_NODE_ID_UNSET) {
// process the TX and RX buffer
processTxRxOnce(&ins, &sock_ins, 10); //10Ms
const uint64_t ts = getMonotonicTimestampUSec();
if (ts >= next_alloc_req) {
next_alloc_req += ((float)rand() / RAND_MAX) * (1000000);
int32_t result = PNPAllocRequest(&ins);
if (result) {
ins.node_id = PNPGetNodeID();
}
}
}
printf("canard_daemon: canard initialized\n");
printf("start node (ID: %d MTU: %d)\n", ins.node_id,
ins.mtu_bytes);
/* Initialize uORB publishers & subscribers */
uorbConverterInit(&ins, &gps_uorb_port_id, &gps_fix_port_id, &gps_aux_port_id);
g_canard_daemon_started = true;
uint64_t next_1hz_service_at = getMonotonicTimestampUSec();
for (;;) {
processTxRxOnce(&ins, &sock_ins, 10);
uorbProcessSub(10);
}
errout_with_dev:
g_canard_daemon_started = false;
printf("canard_daemon: Terminating!\n");
fflush(stdout);
return errval;
}
/****************************************************************************
* Name: canard_main
*
* Description:
*
****************************************************************************/
int uavcannode_gps_demo_main(int argc, FAR char *argv[])
{
int ret;
printf("canard_main: Starting canard_daemon\n");
if (g_canard_daemon_started) {
printf("canard_main: receive and send task already running\n");
return EXIT_SUCCESS;
}
ret = task_create("canard_daemon", CONFIG_EXAMPLES_LIBCANARDV1_DAEMON_PRIORITY,
CONFIG_EXAMPLES_LIBCANARDV1_DAEMON_STACK_SIZE, canard_daemon,
argv);
if (ret < 0) {
int errcode = errno;
printf("canard_main: ERROR: Failed to start canard_daemon: %d\n",
errcode);
return EXIT_FAILURE;
}
printf("canard_main: canard_daemon started\n");
return EXIT_SUCCESS;
}

@ -1 +0,0 @@
Subproject commit 2a116170285fb47fcaae150ad21c2ccde0756a5f

View File

@ -1,12 +0,0 @@
# libcancl
Libcanard client library, that implements common client functionality for use with UAVCAN/CAN
Current features are
- UAVCAN PNP client
- UAVCAN Register interface
- UAVCAN GetInfo responder
# TODO's
- UAVCAN ExecuteCommand implementation
- UAVCAN diagnostic abstraction/helper

View File

@ -1,264 +0,0 @@
/****************************************************************************
* nxp_bms/BMS_v1/src/UAVCAN/pnp.c
*
* BSD 3-Clause License
*
* Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#include "pnp.h"
// Use NuttX crc64 function TODO fallback header for other platforms
#include <crc64.h>
#include "uavcan/node/ID_1_0.h"
#include "uavcan/pnp/NodeIDAllocationData_1_0.h"
#include "uavcan/pnp/NodeIDAllocationData_2_0.h"
#define PNP1_PORT_ID uavcan_pnp_NodeIDAllocationData_1_0_FIXED_PORT_ID_
#define PNP1_PAYLOAD_SIZE uavcan_pnp_NodeIDAllocationData_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_
#define PNP2_PORT_ID uavcan_pnp_NodeIDAllocationData_2_0_FIXED_PORT_ID_
#define PNP2_PAYLOAD_SIZE uavcan_pnp_NodeIDAllocationData_2_0_SERIALIZATION_BUFFER_SIZE_BYTES_
#define PNP_UNIQUE_ID_SIZE 16 // 128 bit unique id
#define PNP_NODE_ID_NO_PREFERENCE CANARD_NODE_ID_MAX - 2 // No preference -> highest possible node ID, note: the two highest node-ID values are reserved for network maintenance tools
/****************************************************************************
* private data
****************************************************************************/
CanardRxSubscription allocSub;
// 128 bit unique id
uint8_t local_unique_id[PNP_UNIQUE_ID_SIZE];
const CanardNodeID preffered_node_id = PNP_NODE_ID_NO_PREFERENCE;
CanardNodeID node_id = CANARD_NODE_ID_UNSET;
CanardTransferID node_id_alloc_transfer_id = 0;
/****************************************************************************
* private Functions declerations
****************************************************************************/
uint64_t makePseudoUniqueId(uint8_t *pUniqueID)
{
// NuttX CRC64/WE implementation
return crc64(pUniqueID, PNP_UNIQUE_ID_SIZE);
}
/****************************************************************************
* public functions
****************************************************************************/
/* Rule A. On initialization:
* 1. The allocatee subscribes to this message.
* 2. The allocatee starts the Request Timer with a random interval [0, 1) sec of Trequest.
*/
uint32_t initPNPAllocatee(CanardInstance *ins, uint8_t *unique_id)
{
// Store unique_id locally
memcpy(&local_unique_id[0], &unique_id[0], sizeof(local_unique_id));
// Create RX Subscriber so we can listen to NodeIDAllocationData msgs
(void) canardRxSubscribe(ins, // Subscribe to messages uavcan.node.Heartbeat.
CanardTransferKindMessage,
(ins->mtu_bytes == CANARD_MTU_CAN_FD ? PNP2_PORT_ID : PNP1_PORT_ID), // The fixed Subject-ID
(ins->mtu_bytes == CANARD_MTU_CAN_FD ? PNP2_PAYLOAD_SIZE : PNP1_PAYLOAD_SIZE),
CANARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC,
&allocSub);
// Callee should start a timer with a random interval between 0 and 1 sec
}
/* Rule B. On expiration of the Request Timer (started as per rules A, B, or C):
* 1. Request Timer restarts with a random interval [0, 1) sec of Trequest (chosen anew).
* 2. The allocatee broadcasts an allocation request message, where the fields are populated as follows:
* unique_id_hash - a 48-bit hash of the unique-ID of the allocatee.
* allocated_node_id - empty (not populated).
*/
int32_t PNPAllocRequest(CanardInstance *ins)
{
int32_t result;
CanardMicrosecond transmission_deadline = getMonotonicTimestampUSec() + 1000 * 10;
// Callee should restart timer
// Allocation already done, nothing to do TODO maybe stop subscribing
if (node_id != CANARD_NODE_ID_UNSET) {
return 1;
}
if (ins->mtu_bytes == CANARD_MTU_CAN_FD) {
// NodeIDAllocationData message
uavcan_pnp_NodeIDAllocationData_2_0 node_id_alloc_msg;
uint8_t node_id_alloc_payload_buffer[PNP2_PAYLOAD_SIZE];
memcpy(node_id_alloc_msg.unique_id, local_unique_id, sizeof(node_id_alloc_msg.unique_id));
node_id_alloc_msg.node_id.value = preffered_node_id;
CanardTransfer transfer = {
.timestamp_usec = transmission_deadline, // Zero if transmission deadline is not limited.
.priority = CanardPriorityNominal,
.transfer_kind = CanardTransferKindMessage,
.port_id = PNP2_PORT_ID, // This is the subject-ID.
.remote_node_id = CANARD_NODE_ID_UNSET, // Messages cannot be unicast, so use UNSET.
.transfer_id = node_id_alloc_transfer_id,
.payload_size = PNP2_PAYLOAD_SIZE,
.payload = &node_id_alloc_payload_buffer,
};
result = uavcan_pnp_NodeIDAllocationData_2_0_serialize_(&node_id_alloc_msg, &node_id_alloc_payload_buffer,
&transfer.payload_size);
if (result == 0) {
// set the data ready in the buffer and chop if needed
++node_id_alloc_transfer_id; // The transfer-ID shall be incremented after every transmission on this subject.
result = canardTxPush(ins, &transfer);
}
} else {
// NodeIDAllocationData message
uavcan_pnp_NodeIDAllocationData_1_0 node_id_alloc_msg;
uavcan_pnp_NodeIDAllocationData_1_0_initialize_(&node_id_alloc_msg);
uint8_t node_id_alloc_payload_buffer[PNP1_PAYLOAD_SIZE];
node_id_alloc_msg.unique_id_hash = (makePseudoUniqueId(local_unique_id) & 0xFFFFFFFFFFFF);
CanardTransfer transfer = {
.timestamp_usec = transmission_deadline, // Zero if transmission deadline is not limited.
.priority = CanardPriorityNominal,
.transfer_kind = CanardTransferKindMessage,
.port_id = PNP1_PORT_ID, // This is the subject-ID.
.remote_node_id = CANARD_NODE_ID_UNSET, // Messages cannot be unicast, so use UNSET.
.transfer_id = node_id_alloc_transfer_id,
.payload_size = PNP1_PAYLOAD_SIZE,
.payload = &node_id_alloc_payload_buffer,
};
result = uavcan_pnp_NodeIDAllocationData_1_0_serialize_(&node_id_alloc_msg, &node_id_alloc_payload_buffer,
&transfer.payload_size);
if (result == 0) {
// set the data ready in the buffer and chop if needed
++node_id_alloc_transfer_id; // The transfer-ID shall be incremented after every transmission on this subject.
result = canardTxPush(ins, &transfer);
}
}
if (result < 0) {
// An error has occurred: either an argument is invalid or we've ran out of memory.
// It is possible to statically prove that an out-of-memory will never occur for a given application if the
// heap is sized correctly; for background, refer to the Robson's Proof and the documentation for O1Heap.
return result;
}
return 0;
}
/* Rule C. On any allocation message, even if other rules also match:
* 1. Request Timer restarts with a random interval of Trequest (chosen anew).
*
* Rule D. On an allocation message WHERE (source node-ID is non-anonymous, i.e., regular allocation response)
* AND (the field unique_id_hash matches the allocatee's 48-bit unique-ID hash)
* AND (the field allocated_node_id is populated):
* 1. Request Timer stops.
* 2. The allocatee initializes its node-ID with the received value.
* 3. The allocatee terminates its subscription to allocation messages.
* 4. Exit.
*/
int32_t PNPProcess(CanardInstance *ins, CanardTransfer *transfer)
{
// Allocation already done, nothing to do
if (node_id != CANARD_NODE_ID_UNSET) {
return 1;
}
if (transfer->remote_node_id == CANARD_NODE_ID_UNSET) { // Another request, ignore.
return 0;
}
int32_t allocated = CANARD_NODE_ID_UNSET;
if (ins->mtu_bytes == CANARD_MTU_CAN_FD) {
uavcan_pnp_NodeIDAllocationData_2_0 msg;
size_t pnp_in_size_bits = transfer->payload_size;
uavcan_pnp_NodeIDAllocationData_2_0_deserialize_(&msg, transfer->payload, &pnp_in_size_bits);
if (memcmp(msg.unique_id, local_unique_id, sizeof(msg.unique_id)) == 0) {
allocated = msg.node_id.value;
}
} else {
uavcan_pnp_NodeIDAllocationData_1_0 msg;
size_t pnp_in_size_bits = transfer->payload_size;
uavcan_pnp_NodeIDAllocationData_1_0_deserialize_(&msg, transfer->payload, &pnp_in_size_bits);
if (msg.allocated_node_id.count > 0) {
if (msg.unique_id_hash == (makePseudoUniqueId(local_unique_id) & 0xFFFFFFFFFFFF)) {
allocated = msg.allocated_node_id.elements[0].value;
}
}
}
if (allocated == CANARD_NODE_ID_UNSET) {
return -1; // UID mismatch.
}
if (allocated <= 0 || allocated >= CANARD_NODE_ID_MAX)
// Allocated node-ID ignored because it exceeds max_node_id
{
return -1;
}
// Plug-and-play allocation done: got node-ID %s from server %s', allocated, meta.source_node_id)
node_id = allocated;
return 1;
}
CanardNodeID PNPGetNodeID()
{
return node_id;
}
CanardPortID PNPGetPortID(CanardInstance *ins)
{
return (ins->mtu_bytes == CANARD_MTU_CAN_FD ? PNP2_PORT_ID : PNP1_PORT_ID);
}

View File

@ -1,74 +0,0 @@
/****************************************************************************
* nxp_bms/BMS_v1/inc/UAVCAN/pnp.h
*
* BSD 3-Clause License
*
* Copyright 2020 NXP
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
** ###################################################################
** Filename : pnp.h
** Project : SmartBattery_RDDRONE_BMS772
** Processor : S32K144
** Version : 1.00
** Date : 2020-10-29
** Abstract :
** pnp module.
**
** ###################################################################*/
/*!
** @file pnp.h
**
** @version 01.00
**
** @brief
** pnp module. this module implements the UAVCAN plug and play protocol
**
*/
#ifndef UAVCAN_PNP_H_
#define UAVCAN_PNP_H_
#define NUNAVUT_ASSERT
#include <canard.h>
#include "time.h"
uint32_t initPNPAllocatee(CanardInstance *ins, uint8_t *unique_id);
int32_t PNPAllocRequest(CanardInstance *ins);
int32_t PNPProcess(CanardInstance *ins, CanardTransfer *transfer);
CanardNodeID PNPGetNodeID();
const CanardPortID PNPGetPortID();
#endif //UAVCAN_PNP_H_

View File

@ -1,282 +0,0 @@
#include "registerinterface.h"
#include "uavcan/_register/Access_1_0.h"
#include "uavcan/_register/List_1_0.h"
#include "uavcan/_register/Name_1_0.h"
#include "uavcan/_register/Value_1_0.h"
#include <stdio.h>
#include <string.h>
/****************************************************************************
* private data
****************************************************************************/
uavcan_node_GetInfo_Response_1_0 *node_info;
CanardTransferID getinfo_response_transfer_id = 0;
CanardTransferID register_access_response_transfer_id = 0;
CanardTransferID register_list_response_transfer_id = 0;
CanardRxSubscription getinfo_subscription;
CanardRxSubscription register_access_subscription;
CanardRxSubscription register_list_subscription;
//TODO register list and data
uavcan_register_interface_entry register_list[UAVCAN_REGISTER_COUNT];
uint32_t register_list_size = 0;
/****************************************************************************
* public functions
****************************************************************************/
int32_t uavcan_register_interface_init(CanardInstance *ins, uavcan_node_GetInfo_Response_1_0 *info)
{
node_info = info; //TODO think about retention, copy isntead?
(void) canardRxSubscribe(ins,
CanardTransferKindRequest,
uavcan_node_GetInfo_1_0_FIXED_PORT_ID_,
uavcan_node_GetInfo_Request_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_,
CANARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC,
&getinfo_subscription);
(void) canardRxSubscribe(ins,
CanardTransferKindRequest,
uavcan_register_Access_1_0_FIXED_PORT_ID_,
uavcan_register_Access_Request_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_,
CANARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC,
&register_access_subscription);
(void) canardRxSubscribe(ins,
CanardTransferKindRequest,
uavcan_register_List_1_0_FIXED_PORT_ID_,
uavcan_register_List_Request_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_,
CANARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC,
&register_list_subscription);
}
int32_t uavcan_register_interface_add_entry(const char *name, register_access_set_callback cb_set,
register_access_get_callback cb_get)
{
if (register_list_size < UAVCAN_REGISTER_COUNT) {
register_list[register_list_size].name = name;
register_list[register_list_size].cb_set = cb_set;
register_list[register_list_size].cb_get = cb_get;
register_list_size++;
return 0;
} else {
return -UAVCAN_REGISTER_ERROR_OUT_OF_MEMORY; // register list full
}
}
// Handler for all PortID registration related messages
int32_t uavcan_register_interface_process(CanardInstance *ins, CanardTransfer *transfer)
{
if (transfer->port_id == uavcan_node_GetInfo_1_0_FIXED_PORT_ID_) {
return uavcan_register_interface_get_info_response(ins, transfer);
} else if (transfer->port_id == uavcan_register_Access_1_0_FIXED_PORT_ID_) {
return uavcan_register_interface_access_response(ins, transfer);
} else if (transfer->port_id == uavcan_register_List_1_0_FIXED_PORT_ID_) {
return uavcan_register_interface_list_response(ins, transfer);
}
return 0; // Nothing to do
}
// Handler for node.GetInfo which yields a response
int32_t uavcan_register_interface_get_info_response(CanardInstance *ins, CanardTransfer *request)
{
uavcan_node_GetInfo_Request_1_0 msg;
size_t in_size_bits;
if (uavcan_node_GetInfo_Request_1_0_deserialize_(&msg, request->payload, request->payload_size) < 0) {
//Error deserialize failed
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
//Note so technically the uavcan_node_GetInfo_Request_1_0 is an empty message not sure if the code above is required
// Setup node.GetInfo response
uint8_t response_payload_buffer[uavcan_node_GetInfo_Response_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_];
CanardMicrosecond transmission_deadline = getMonotonicTimestampUSec() + 1000 * 100;
CanardTransfer response = {
.timestamp_usec = transmission_deadline, // Zero if transmission deadline is not limited.
.priority = CanardPriorityNominal,
.transfer_kind = CanardTransferKindResponse,
.port_id = uavcan_node_GetInfo_1_0_FIXED_PORT_ID_, // This is the subject-ID.
.remote_node_id = request->remote_node_id, // Send back to request Node
.transfer_id = getinfo_response_transfer_id,
.payload_size = uavcan_node_GetInfo_Response_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_,
.payload = &response_payload_buffer,
};
int32_t result = uavcan_node_GetInfo_Response_1_0_serialize_(node_info, &response_payload_buffer,
&response.payload_size);
if (result == 0) {
// set the data ready in the buffer and chop if needed
++getinfo_response_transfer_id; // The transfer-ID shall be incremented after every transmission on this subject.
result = canardTxPush(ins, &response);
}
if (result < 0) {
// An error has occurred: either an argument is invalid or we've ran out of memory.
// It is possible to statically prove that an out-of-memory will never occur for a given application if the
// heap is sized correctly; for background, refer to the Robson's Proof and the documentation for O1Heap.
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
return 1;
}
// Handler for register access interface
int32_t uavcan_register_interface_access_response(CanardInstance *ins, CanardTransfer *request)
{
int index;
{
uavcan_register_Access_Request_1_0 msg;
if (uavcan_register_Access_Request_1_0_deserialize_(&msg, request->payload, &request->payload_size) < 0) {
//Error deserialize failed
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
{
char register_string[255];
int reg_str_len;
for (index = 0; index < register_list_size; index++) {
reg_str_len = sprintf(register_string, "uavcan.pub.%s.id",
register_list[index].name); //TODO more option then pub (sub rate etc)
if (strncmp(msg.name.name.elements, register_string, reg_str_len) == 0) {
if (msg.value._tag_ != 0) { // Value has been set thus we call set handler
if (register_list[index].cb_set(&msg.value) != 0) {
// TODO error ocurred check doc for correct response
}
}
break; // We're done exit loop
}
}
}
}
uavcan_register_Access_Response_1_0 response_msg;
uavcan_register_Access_Response_1_0_initialize_(&response_msg);
if (index < register_list_size) { // Index is available
response_msg.value = register_list[index].cb_get();
} else { // Index not found return empty response
uavcan_register_Value_1_0_initialize_(&response_msg.value);
uavcan_register_Value_1_0_select_empty_(&response_msg.value);
}
uint8_t response_payload_buffer[uavcan_register_Access_Response_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_];
CanardMicrosecond transmission_deadline = getMonotonicTimestampUSec() + 1000 * 100;
CanardTransfer response = {
.timestamp_usec = transmission_deadline, // Zero if transmission deadline is not limited.
.priority = CanardPriorityNominal,
.transfer_kind = CanardTransferKindResponse,
.port_id = uavcan_register_Access_1_0_FIXED_PORT_ID_, // This is the subject-ID.
.remote_node_id = request->remote_node_id, // Send back to request Node
.transfer_id = register_list_response_transfer_id,
.payload_size = uavcan_register_Access_Response_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_,
.payload = &response_payload_buffer,
};
int32_t result = uavcan_register_Access_Response_1_0_serialize_(&response_msg, &response_payload_buffer,
&response.payload_size);
if (result == 0) {
// set the data ready in the buffer and chop if needed
++register_access_response_transfer_id; // The transfer-ID shall be incremented after every transmission on this subject.
result = canardTxPush(ins, &response);
}
if (result < 0) {
// An error has occurred: either an argument is invalid or we've ran out of memory.
// It is possible to statically prove that an out-of-memory will never occur for a given application if the
// heap is sized correctly; for background, refer to the Robson's Proof and the documentation for O1Heap.
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
return 1;
}
// Handler for register list interface
int32_t uavcan_register_interface_list_response(CanardInstance *ins, CanardTransfer *request)
{
uavcan_register_List_Request_1_0 msg;
if (uavcan_register_List_Request_1_0_deserialize_(&msg, request->payload, &request->payload_size) < 0) {
//Error deserialize failed
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
//Setup register response
uint8_t response_payload_buffer[uavcan_register_List_Response_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_]; //TODO we know already how big our response is, don't overallocate.
CanardMicrosecond transmission_deadline = getMonotonicTimestampUSec() + 1000 * 100;
uavcan_register_List_Response_1_0 response_msg;
// Reponse magic start
if (msg.index < register_list_size) {
response_msg.name.name.count = sprintf(response_msg.name.name.elements,
"uavcan.pub.%s.id",
register_list[msg.index].name);
} else {
response_msg.name.name.count = 0;
}
//TODO more option then pub (sub rate
// Response magic end
CanardTransfer response = {
.timestamp_usec = transmission_deadline, // Zero if transmission deadline is not limited.
.priority = CanardPriorityNominal,
.transfer_kind = CanardTransferKindResponse,
.port_id = uavcan_register_List_1_0_FIXED_PORT_ID_, // This is the subject-ID.
.remote_node_id = request->remote_node_id, // Send back to request Node
.transfer_id = register_list_response_transfer_id,
.payload_size = uavcan_register_List_Response_1_0_SERIALIZATION_BUFFER_SIZE_BYTES_, //See prev TODO
.payload = &response_payload_buffer,
};
int32_t result = uavcan_register_List_Response_1_0_serialize_(&response_msg, &response_payload_buffer,
&response.payload_size);
if (result == 0) {
// set the data ready in the buffer and chop if needed
++register_list_response_transfer_id; // The transfer-ID shall be incremented after every transmission on this subject.
result = canardTxPush(ins, &response);
}
if (result < 0) {
// An error has occurred: either an argument is invalid or we've ran out of memory.
// It is possible to statically prove that an out-of-memory will never occur for a given application if the
// heap is sized correctly; for background, refer to the Robson's Proof and the documentation for O1Heap.
return -UAVCAN_REGISTER_ERROR_SERIALIZATION;
}
return 1;
}

View File

@ -1,49 +0,0 @@
#ifndef UAVCAN_REGISTER_INTERFACE_H_
#define UAVCAN_REGISTER_INTERFACE_H_
#include <canard.h>
#define NUNAVUT_ASSERT
#include "uavcan/node/GetInfo_1_0.h"
#include "uavcan/_register/Value_1_0.h"
#include "time.h"
//No of pre allocated register entries
#ifndef UAVCAN_REGISTER_COUNT
# define UAVCAN_REGISTER_COUNT 5
#endif
#define UAVCAN_REGISTER_ERROR_SERIALIZATION 1
#define UAVCAN_REGISTER_ERROR_OUT_OF_MEMORY 2
typedef int32_t (*register_access_set_callback)(uavcan_register_Value_1_0 *value);
typedef uavcan_register_Value_1_0(*register_access_get_callback)(void);
typedef struct {
/// uavcan.register.Name.1.0 name
const char *name;
register_access_set_callback cb_set;
register_access_get_callback cb_get;
} uavcan_register_interface_entry;
int32_t uavcan_register_interface_init(CanardInstance *ins, uavcan_node_GetInfo_Response_1_0 *info);
int32_t uavcan_register_interface_add_entry(const char *name, register_access_set_callback cb_set,
register_access_get_callback cb_get);
// Handler for all PortID registration related messages
int32_t uavcan_register_interface_process(CanardInstance *ins, CanardTransfer *transfer);
// Handler for node.GetInfo which yields a response
int32_t uavcan_register_interface_get_info_response(CanardInstance *ins, CanardTransfer *request);
// Handler for register access interface
int32_t uavcan_register_interface_access_response(CanardInstance *ins, CanardTransfer *request);
// Handler for register list interface
int32_t uavcan_register_interface_list_response(CanardInstance *ins, CanardTransfer *request);
#endif //UAVCAN_REGISTER_INTERFACE_H_

View File

@ -1,6 +0,0 @@
#ifndef MAVCAN_TIME_H_
#define MAVCAN_TIME_H_
uint64_t getMonotonicTimestampUSec(void);
#endif //MAVCAN_TIME_H_

View File

@ -1,498 +0,0 @@
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// Copyright (c) 2020 Pavel Kirienko
// Authors: Pavel Kirienko <pavel.kirienko@zubax.com>
#include "o1heap.h"
#include <assert.h>
// ---------------------------------------- BUILD CONFIGURATION OPTIONS ----------------------------------------
/// The assertion macro defaults to the standard assert().
/// It can be overridden to manually suppress assertion checks or use a different error handling policy.
#ifndef O1HEAP_ASSERT
// Intentional violation of MISRA: the assertion check macro cannot be replaced with a function definition.
# define O1HEAP_ASSERT(x) assert(x) // NOSONAR
#endif
/// Branch probability annotations are used to improve the worst case execution time (WCET). They are entirely optional.
/// A stock implementation is provided for some well-known compilers; for other compilers it defaults to nothing.
/// If you are using a different compiler, consider overriding this value.
#ifndef O1HEAP_LIKELY
# if defined(__GNUC__) || defined(__clang__) || defined(__CC_ARM)
// Intentional violation of MISRA: branch hinting macro cannot be replaced with a function definition.
# define O1HEAP_LIKELY(x) __builtin_expect((x), 1) // NOSONAR
# else
# define O1HEAP_LIKELY(x) x
# endif
#endif
/// This option is used for testing only. Do not use in production.
#if defined(O1HEAP_EXPOSE_INTERNALS) && O1HEAP_EXPOSE_INTERNALS
# define O1HEAP_PRIVATE
#else
# define O1HEAP_PRIVATE static inline
#endif
// ---------------------------------------- INTERNAL DEFINITIONS ----------------------------------------
#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L)
# error "Unsupported language: ISO C99 or a newer version is required."
#endif
#if __STDC_VERSION__ < 201112L
// Intentional violation of MISRA: static assertion macro cannot be replaced with a function definition.
# define static_assert(x, ...) typedef char _static_assert_gl(_static_assertion_, __LINE__)[(x) ? 1 : -1] // NOSONAR
# define _static_assert_gl(a, b) _static_assert_gl_impl(a, b) // NOSONAR
// Intentional violation of MISRA: the paste operator ## cannot be avoided in this context.
# define _static_assert_gl_impl(a, b) a##b // NOSONAR
#endif
/// The overhead is at most O1HEAP_ALIGNMENT bytes large,
/// then follows the user data which shall keep the next fragment aligned.
#define FRAGMENT_SIZE_MIN (O1HEAP_ALIGNMENT * 2U)
/// This is risky, handle with care: if the allocation amount plus per-fragment overhead exceeds 2**(b-1),
/// where b is the pointer bit width, then ceil(log2(amount)) yields b; then 2**b causes an integer overflow.
/// To avoid this, we put a hard limit on fragment size (which is amount + per-fragment overhead): 2**(b-1)
#define FRAGMENT_SIZE_MAX ((SIZE_MAX >> 1U) + 1U)
/// Normally we should subtract log2(FRAGMENT_SIZE_MIN) but log2 is bulky to compute using the preprocessor only.
/// We will certainly end up with unused bins this way, but it is cheap to ignore.
#define NUM_BINS_MAX (sizeof(size_t) * 8U)
static_assert((O1HEAP_ALIGNMENT & (O1HEAP_ALIGNMENT - 1U)) == 0U, "Not a power of 2");
static_assert((FRAGMENT_SIZE_MIN & (FRAGMENT_SIZE_MIN - 1U)) == 0U, "Not a power of 2");
static_assert((FRAGMENT_SIZE_MAX & (FRAGMENT_SIZE_MAX - 1U)) == 0U, "Not a power of 2");
typedef struct Fragment Fragment;
typedef struct FragmentHeader {
Fragment *next;
Fragment *prev;
size_t size;
bool used;
} FragmentHeader;
static_assert(sizeof(FragmentHeader) <= O1HEAP_ALIGNMENT, "Memory layout error");
struct Fragment {
FragmentHeader header;
// Everything past the header may spill over into the allocatable space. The header survives across alloc/free.
Fragment *next_free; // Next free fragment in the bin; NULL in the last one.
Fragment *prev_free; // Same but points back; NULL in the first one.
};
static_assert(sizeof(Fragment) <= FRAGMENT_SIZE_MIN, "Memory layout error");
struct O1HeapInstance {
Fragment *bins[NUM_BINS_MAX]; ///< Smallest fragments are in the bin at index 0.
size_t nonempty_bin_mask; ///< Bit 1 represents a non-empty bin; bin at index 0 is for the smallest fragments.
O1HeapHook critical_section_enter;
O1HeapHook critical_section_leave;
O1HeapDiagnostics diagnostics;
};
/// The amount of space allocated for the heap instance.
/// Its size is padded up to O1HEAP_ALIGNMENT to ensure correct alignment of the allocation arena that follows.
#define INSTANCE_SIZE_PADDED ((sizeof(O1HeapInstance) + O1HEAP_ALIGNMENT - 1U) & ~(O1HEAP_ALIGNMENT - 1U))
static_assert(INSTANCE_SIZE_PADDED >= sizeof(O1HeapInstance), "Invalid instance footprint computation");
static_assert((INSTANCE_SIZE_PADDED % O1HEAP_ALIGNMENT) == 0U, "Invalid instance footprint computation");
/// True if the argument is an integer power of two or zero.
O1HEAP_PRIVATE bool isPowerOf2(const size_t x);
O1HEAP_PRIVATE bool isPowerOf2(const size_t x)
{
return (x & (x - 1U)) == 0U;
}
/// Special case: if the argument is zero, returns zero.
O1HEAP_PRIVATE uint8_t log2Floor(const size_t x);
O1HEAP_PRIVATE uint8_t log2Floor(const size_t x)
{
size_t tmp = x;
uint8_t y = 0;
// This is currently the only exception to the statement "routines contain neither loops nor recursion".
// It is unclear if there is a better way to compute the binary logarithm than this.
while (tmp > 1U) {
tmp >>= 1U;
y++;
}
return y;
}
/// Special case: if the argument is zero, returns zero.
O1HEAP_PRIVATE uint8_t log2Ceil(const size_t x);
O1HEAP_PRIVATE uint8_t log2Ceil(const size_t x)
{
return (uint8_t)(log2Floor(x) + (isPowerOf2(x) ? 0U : 1U));
}
/// Raise 2 into the specified power.
/// You might be tempted to do something like (1U << power). WRONG! We humans are prone to forgetting things.
/// If you forget to cast your 1U to size_t or ULL, you may end up with undefined behavior.
O1HEAP_PRIVATE size_t pow2(const uint8_t power);
O1HEAP_PRIVATE size_t pow2(const uint8_t power)
{
return ((size_t) 1U) << power;
}
O1HEAP_PRIVATE void invoke(const O1HeapHook hook);
O1HEAP_PRIVATE void invoke(const O1HeapHook hook)
{
if (hook != NULL) {
hook();
}
}
/// Links two fragments so that their next/prev pointers point to each other; left goes before right.
O1HEAP_PRIVATE void interlink(Fragment *const left, Fragment *const right);
O1HEAP_PRIVATE void interlink(Fragment *const left, Fragment *const right)
{
if (O1HEAP_LIKELY(left != NULL)) {
left->header.next = right;
}
if (O1HEAP_LIKELY(right != NULL)) {
right->header.prev = left;
}
}
/// Adds a new block into the appropriate bin and updates the lookup mask.
O1HEAP_PRIVATE void rebin(O1HeapInstance *const handle, Fragment *const fragment);
O1HEAP_PRIVATE void rebin(O1HeapInstance *const handle, Fragment *const fragment)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(fragment != NULL);
O1HEAP_ASSERT(fragment->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT((fragment->header.size % FRAGMENT_SIZE_MIN) == 0U);
const uint8_t idx = log2Floor(fragment->header.size / FRAGMENT_SIZE_MIN); // Round DOWN when inserting.
O1HEAP_ASSERT(idx < NUM_BINS_MAX);
// Add the new fragment to the beginning of the bin list.
// I.e., each allocation will be returning the least-recently-used fragment -- good for caching.
fragment->next_free = handle->bins[idx];
fragment->prev_free = NULL;
if (O1HEAP_LIKELY(handle->bins[idx] != NULL)) {
handle->bins[idx]->prev_free = fragment;
}
handle->bins[idx] = fragment;
handle->nonempty_bin_mask |= pow2(idx);
}
/// Removes the specified block from its bin.
O1HEAP_PRIVATE void unbin(O1HeapInstance *const handle, const Fragment *const fragment);
O1HEAP_PRIVATE void unbin(O1HeapInstance *const handle, const Fragment *const fragment)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(fragment != NULL);
O1HEAP_ASSERT(fragment->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT((fragment->header.size % FRAGMENT_SIZE_MIN) == 0U);
const uint8_t idx = log2Floor(fragment->header.size / FRAGMENT_SIZE_MIN); // Round DOWN when removing.
O1HEAP_ASSERT(idx < NUM_BINS_MAX);
// Remove the bin from the free fragment list.
if (O1HEAP_LIKELY(fragment->next_free != NULL)) {
fragment->next_free->prev_free = fragment->prev_free;
}
if (O1HEAP_LIKELY(fragment->prev_free != NULL)) {
fragment->prev_free->next_free = fragment->next_free;
}
// Update the bin header.
if (O1HEAP_LIKELY(handle->bins[idx] == fragment)) {
O1HEAP_ASSERT(fragment->prev_free == NULL);
handle->bins[idx] = fragment->next_free;
if (O1HEAP_LIKELY(handle->bins[idx] == NULL)) {
handle->nonempty_bin_mask &= ~pow2(idx);
}
}
}
// ---------------------------------------- PUBLIC API IMPLEMENTATION ----------------------------------------
O1HeapInstance *o1heapInit(void *const base,
const size_t size,
const O1HeapHook critical_section_enter,
const O1HeapHook critical_section_leave)
{
O1HeapInstance *out = NULL;
if ((base != NULL) && ((((size_t) base) % O1HEAP_ALIGNMENT) == 0U) &&
(size >= (INSTANCE_SIZE_PADDED + FRAGMENT_SIZE_MIN))) {
// Allocate the core heap metadata structure in the beginning of the arena.
O1HEAP_ASSERT(((size_t) base) % sizeof(O1HeapInstance *) == 0U);
out = (O1HeapInstance *) base;
out->nonempty_bin_mask = 0U;
out->critical_section_enter = critical_section_enter;
out->critical_section_leave = critical_section_leave;
for (size_t i = 0; i < NUM_BINS_MAX; i++) {
out->bins[i] = NULL;
}
// Limit and align the capacity.
size_t capacity = size - INSTANCE_SIZE_PADDED;
if (capacity > FRAGMENT_SIZE_MAX) {
capacity = FRAGMENT_SIZE_MAX;
}
while ((capacity % FRAGMENT_SIZE_MIN) != 0) {
O1HEAP_ASSERT(capacity > 0U);
capacity--;
}
O1HEAP_ASSERT((capacity % FRAGMENT_SIZE_MIN) == 0);
O1HEAP_ASSERT((capacity >= FRAGMENT_SIZE_MIN) && (capacity <= FRAGMENT_SIZE_MAX));
// Initialize the root fragment.
Fragment *const frag = (Fragment *)(void *)(((uint8_t *) base) + INSTANCE_SIZE_PADDED);
O1HEAP_ASSERT((((size_t) frag) % O1HEAP_ALIGNMENT) == 0U);
frag->header.next = NULL;
frag->header.prev = NULL;
frag->header.size = capacity;
frag->header.used = false;
frag->next_free = NULL;
frag->prev_free = NULL;
rebin(out, frag);
O1HEAP_ASSERT(out->nonempty_bin_mask != 0U);
// Initialize the diagnostics.
out->diagnostics.capacity = capacity;
out->diagnostics.allocated = 0U;
out->diagnostics.peak_allocated = 0U;
out->diagnostics.peak_request_size = 0U;
out->diagnostics.oom_count = 0U;
}
return out;
}
void *o1heapAllocate(O1HeapInstance *const handle, const size_t amount)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(handle->diagnostics.capacity <= FRAGMENT_SIZE_MAX);
void *out = NULL;
// If the amount approaches approx. SIZE_MAX/2, an undetected integer overflow may occur.
// To avoid that, we do not attempt allocation if the amount exceeds the hard limit.
// We perform multiple redundant checks to account for a possible unaccounted overflow.
if (O1HEAP_LIKELY((amount > 0U) && (amount <= (handle->diagnostics.capacity - O1HEAP_ALIGNMENT)))) {
// Add the header size and align the allocation size to the power of 2.
// See "Timing-Predictable Memory Allocation In Hard Real-Time Systems", Herter, page 27.
const size_t fragment_size = pow2(log2Ceil(amount + O1HEAP_ALIGNMENT));
O1HEAP_ASSERT(fragment_size <= FRAGMENT_SIZE_MAX);
O1HEAP_ASSERT(fragment_size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT(fragment_size >= amount + O1HEAP_ALIGNMENT);
O1HEAP_ASSERT(isPowerOf2(fragment_size));
const uint8_t optimal_bin_index = log2Ceil(fragment_size / FRAGMENT_SIZE_MIN); // Use CEIL when fetching.
O1HEAP_ASSERT(optimal_bin_index < NUM_BINS_MAX);
const size_t candidate_bin_mask = ~(pow2(optimal_bin_index) - 1U);
invoke(handle->critical_section_enter);
// Find the smallest non-empty bin we can use.
const size_t suitable_bins = handle->nonempty_bin_mask & candidate_bin_mask;
const size_t smallest_bin_mask = suitable_bins & ~(suitable_bins - 1U); // Clear all bits but the lowest.
if (O1HEAP_LIKELY(smallest_bin_mask != 0)) {
O1HEAP_ASSERT(isPowerOf2(smallest_bin_mask));
const uint8_t bin_index = log2Floor(smallest_bin_mask);
O1HEAP_ASSERT(bin_index >= optimal_bin_index);
O1HEAP_ASSERT(bin_index < NUM_BINS_MAX);
// The bin we found shall not be empty, otherwise it's a state divergence (memory corruption?).
Fragment *const frag = handle->bins[bin_index];
O1HEAP_ASSERT(frag != NULL);
O1HEAP_ASSERT(frag->header.size >= fragment_size);
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
O1HEAP_ASSERT(!frag->header.used);
unbin(handle, frag);
// Split the fragment if it is too large.
const size_t leftover = frag->header.size - fragment_size;
frag->header.size = fragment_size;
O1HEAP_ASSERT(leftover < handle->diagnostics.capacity); // Overflow check.
O1HEAP_ASSERT(leftover % FRAGMENT_SIZE_MIN == 0U); // Alignment check.
if (O1HEAP_LIKELY(leftover >= FRAGMENT_SIZE_MIN)) {
Fragment *const new_frag = (Fragment *)(void *)(((uint8_t *) frag) + fragment_size);
O1HEAP_ASSERT(((size_t) new_frag) % O1HEAP_ALIGNMENT == 0U);
new_frag->header.size = leftover;
new_frag->header.used = false;
interlink(new_frag, frag->header.next);
interlink(frag, new_frag);
rebin(handle, new_frag);
}
// Update the diagnostics.
O1HEAP_ASSERT((handle->diagnostics.allocated % FRAGMENT_SIZE_MIN) == 0U);
handle->diagnostics.allocated += fragment_size;
O1HEAP_ASSERT(handle->diagnostics.allocated <= handle->diagnostics.capacity);
if (O1HEAP_LIKELY(handle->diagnostics.peak_allocated < handle->diagnostics.allocated)) {
handle->diagnostics.peak_allocated = handle->diagnostics.allocated;
}
// Finalize the fragment we just allocated.
O1HEAP_ASSERT(frag->header.size >= amount + O1HEAP_ALIGNMENT);
frag->header.used = true;
out = ((uint8_t *) frag) + O1HEAP_ALIGNMENT;
}
} else {
invoke(handle->critical_section_enter);
}
// Update the diagnostics.
if (O1HEAP_LIKELY(handle->diagnostics.peak_request_size < amount)) {
handle->diagnostics.peak_request_size = amount;
}
if (O1HEAP_LIKELY((out == NULL) && (amount > 0U))) {
handle->diagnostics.oom_count++;
}
invoke(handle->critical_section_leave);
return out;
}
void o1heapFree(O1HeapInstance *const handle, void *const pointer)
{
O1HEAP_ASSERT(handle != NULL);
O1HEAP_ASSERT(handle->diagnostics.capacity <= FRAGMENT_SIZE_MAX);
if (O1HEAP_LIKELY(pointer != NULL)) { // NULL pointer is a no-op.
Fragment *const frag = (Fragment *)(void *)(((uint8_t *) pointer) - O1HEAP_ALIGNMENT);
// Check for heap corruption in debug builds.
O1HEAP_ASSERT(((size_t) frag) % sizeof(Fragment *) == 0U);
O1HEAP_ASSERT(((size_t) frag) >= (((size_t) handle) + INSTANCE_SIZE_PADDED));
O1HEAP_ASSERT(((size_t) frag) <=
(((size_t) handle) + INSTANCE_SIZE_PADDED + handle->diagnostics.capacity - FRAGMENT_SIZE_MIN));
O1HEAP_ASSERT(frag->header.used); // Catch double-free
O1HEAP_ASSERT(((size_t) frag->header.next) % sizeof(Fragment *) == 0U);
O1HEAP_ASSERT(((size_t) frag->header.prev) % sizeof(Fragment *) == 0U);
O1HEAP_ASSERT(frag->header.size >= FRAGMENT_SIZE_MIN);
O1HEAP_ASSERT(frag->header.size <= handle->diagnostics.capacity);
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
invoke(handle->critical_section_enter);
// Even if we're going to drop the fragment later, mark it free anyway to prevent double-free.
frag->header.used = false;
// Update the diagnostics. It must be done before merging because it invalidates the fragment size information.
O1HEAP_ASSERT(handle->diagnostics.allocated >= frag->header.size); // Heap corruption check.
handle->diagnostics.allocated -= frag->header.size;
// Merge with siblings and insert the returned fragment into the appropriate bin and update metadata.
Fragment *const prev = frag->header.prev;
Fragment *const next = frag->header.next;
const bool join_left = (prev != NULL) && (!prev->header.used);
const bool join_right = (next != NULL) && (!next->header.used);
if (join_left && join_right) { // [ prev ][ this ][ next ] => [ ------- prev ------- ]
unbin(handle, prev);
unbin(handle, next);
prev->header.size += frag->header.size + next->header.size;
frag->header.size = 0; // Invalidate the dropped fragment headers to prevent double-free.
next->header.size = 0;
O1HEAP_ASSERT((prev->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(prev, next->header.next);
rebin(handle, prev);
} else if (join_left) { // [ prev ][ this ][ next ] => [ --- prev --- ][ next ]
unbin(handle, prev);
prev->header.size += frag->header.size;
frag->header.size = 0;
O1HEAP_ASSERT((prev->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(prev, next);
rebin(handle, prev);
} else if (join_right) { // [ prev ][ this ][ next ] => [ prev ][ --- this --- ]
unbin(handle, next);
frag->header.size += next->header.size;
next->header.size = 0;
O1HEAP_ASSERT((frag->header.size % FRAGMENT_SIZE_MIN) == 0U);
interlink(frag, next->header.next);
rebin(handle, frag);
} else {
rebin(handle, frag);
}
invoke(handle->critical_section_leave);
}
}
bool o1heapDoInvariantsHold(const O1HeapInstance *const handle)
{
O1HEAP_ASSERT(handle != NULL);
bool valid = true;
invoke(handle->critical_section_enter);
// Check the bin mask consistency.
for (size_t i = 0; i < NUM_BINS_MAX; i++) { // Dear compiler, feel free to unroll this loop.
const bool mask_bit_set = (handle->nonempty_bin_mask & pow2((uint8_t) i)) != 0U;
const bool bin_nonempty = handle->bins[i] != NULL;
valid = valid && (mask_bit_set == bin_nonempty);
}
// Create a local copy of the diagnostics struct to check later and release the critical section early.
const O1HeapDiagnostics diag = handle->diagnostics;
invoke(handle->critical_section_leave);
// Capacity check.
valid = valid && (diag.capacity <= FRAGMENT_SIZE_MAX) && (diag.capacity >= FRAGMENT_SIZE_MIN) &&
((diag.capacity % FRAGMENT_SIZE_MIN) == 0U);
// Allocation info check.
valid = valid && (diag.allocated <= diag.capacity) && ((diag.allocated % FRAGMENT_SIZE_MIN) == 0U) &&
(diag.peak_allocated <= diag.capacity) && (diag.peak_allocated >= diag.allocated) &&
((diag.peak_allocated % FRAGMENT_SIZE_MIN) == 0U);
// Peak request check
valid = valid && ((diag.peak_request_size < diag.capacity) || (diag.oom_count > 0U));
if (diag.peak_request_size == 0U) {
valid = valid && (diag.peak_allocated == 0U) && (diag.allocated == 0U) && (diag.oom_count == 0U);
} else {
valid = valid && // Overflow on summation is possible but safe to ignore.
(((diag.peak_request_size + O1HEAP_ALIGNMENT) <= diag.peak_allocated) || (diag.oom_count > 0U));
}
return valid;
}
O1HeapDiagnostics o1heapGetDiagnostics(const O1HeapInstance *const handle)
{
O1HEAP_ASSERT(handle != NULL);
invoke(handle->critical_section_enter);
const O1HeapDiagnostics out = handle->diagnostics;
invoke(handle->critical_section_leave);
return out;
}

View File

@ -1,142 +0,0 @@
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
// and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// Copyright (c) 2020 Pavel Kirienko
// Authors: Pavel Kirienko <pavel.kirienko@zubax.com>
//
// READ THE DOCUMENTATION IN README.md.
#ifndef O1HEAP_H_INCLUDED
#define O1HEAP_H_INCLUDED
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/// The semantic version number of this distribution.
#define O1HEAP_VERSION_MAJOR 1
/// The guaranteed alignment depends on the platform pointer width.
#define O1HEAP_ALIGNMENT (sizeof(void*) * 4U)
/// The definition is private, so the user code can only operate on pointers. This is done to enforce encapsulation.
typedef struct O1HeapInstance O1HeapInstance;
/// A hook function invoked by the allocator. NULL hooks are silently not invoked (not an error).
typedef void (*O1HeapHook)(void);
/// Runtime diagnostic information. This information can be used to facilitate runtime self-testing,
/// as required by certain safety-critical development guidelines.
/// If assertion checks are not disabled, the library will perform automatic runtime self-diagnostics that trigger
/// an assertion failure if a heap corruption is detected.
/// Health checks and validation can be done with @ref o1heapDoInvariantsHold().
typedef struct {
/// The total amount of memory available for serving allocation requests (heap size).
/// The maximum allocation size is (capacity - O1HEAP_ALIGNMENT).
/// This parameter does not include the overhead used up by @ref O1HeapInstance and arena alignment.
/// This parameter is constant.
size_t capacity;
/// The amount of memory that is currently allocated, including the per-fragment overhead and size alignment.
/// For example, if the application requested a fragment of size 1 byte, the value reported here may be 32 bytes.
size_t allocated;
/// The maximum value of 'allocated' seen since initialization. This parameter is never decreased.
size_t peak_allocated;
/// The largest amount of memory that the allocator has attempted to allocate (perhaps unsuccessfully)
/// since initialization (not including the rounding and the allocator's own per-fragment overhead,
/// so the total is larger). This parameter is never decreased. The initial value is zero.
size_t peak_request_size;
/// The number of times an allocation request could not be completed due to the lack of memory or
/// excessive fragmentation. OOM stands for "out of memory". This parameter is never decreased.
uint64_t oom_count;
} O1HeapDiagnostics;
/// The arena base pointer shall be aligned at @ref O1HEAP_ALIGNMENT, otherwise NULL is returned.
///
/// The total heap capacity cannot exceed approx. (SIZE_MAX/2). If the arena size allows for a larger heap,
/// the excess will be silently truncated away (no error). This is not a realistic use case because a typical
/// application is unlikely to be able to dedicate that much of the address space for the heap.
///
/// The critical section enter/leave callbacks will be invoked when the allocator performs an atomic transaction.
/// There is at most one atomic transaction per allocation/deallocation.
/// Either or both of the callbacks may be NULL if locking is not needed (i.e., the heap is not shared).
/// It is guaranteed that a critical section will never be entered recursively.
/// It is guaranteed that 'enter' is invoked the same number of times as 'leave', unless either of them are NULL.
/// It is guaranteed that 'enter' is invoked before 'leave', unless either of them are NULL.
/// The callbacks are never invoked from the initialization function itself.
///
/// The function initializes a new heap instance allocated in the provided arena, taking some of its space for its
/// own needs (normally about 40..600 bytes depending on the architecture, but this parameter is not characterized).
/// A pointer to the newly initialized instance is returned.
///
/// If the provided space is insufficient, NULL is returned.
///
/// An initialized instance does not hold any resources. Therefore, if the instance is no longer needed,
/// it can be discarded without any de-initialization procedures.
///
/// The time complexity is unspecified.
O1HeapInstance *o1heapInit(void *const base,
const size_t size,
const O1HeapHook critical_section_enter,
const O1HeapHook critical_section_leave);
/// The semantics follows malloc() with additional guarantees the full list of which is provided below.
///
/// If the allocation request is served successfully, a pointer to the newly allocated memory fragment is returned.
/// The returned pointer is guaranteed to be aligned at @ref O1HEAP_ALIGNMENT.
///
/// If the allocation request cannot be served due to the lack of memory or its excessive fragmentation,
/// a NULL pointer is returned.
///
/// The function is executed in constant time (unless the critical section management hooks are used and are not
/// constant-time). The allocated memory is NOT zero-filled (because zero-filling is a variable-complexity operation).
///
/// The function may invoke critical_section_enter and critical_section_leave at most once each (NULL hooks ignored).
void *o1heapAllocate(O1HeapInstance *const handle, const size_t amount);
/// The semantics follows free() with additional guarantees the full list of which is provided below.
///
/// If the pointer does not point to a previously allocated block and is not NULL, the behavior is undefined.
/// Builds where assertion checks are enabled may trigger an assertion failure for some invalid inputs.
///
/// The function is executed in constant time (unless the critical section management hooks are used and are not
/// constant-time).
///
/// The function may invoke critical_section_enter and critical_section_leave at most once each (NULL hooks ignored).
void o1heapFree(O1HeapInstance *const handle, void *const pointer);
/// Performs a basic sanity check on the heap.
/// This function can be used as a weak but fast method of heap corruption detection.
/// It invokes critical_section_enter once (unless NULL) and then critical_section_leave once (unless NULL).
/// If the handle pointer is NULL, the behavior is undefined.
/// The time complexity is constant.
/// The return value is truth if the heap looks valid, falsity otherwise.
bool o1heapDoInvariantsHold(const O1HeapInstance *const handle);
/// Samples and returns a copy of the diagnostic information, see @ref O1HeapDiagnostics.
/// This function merely copies the structure from an internal storage, so it is fast to return.
/// It invokes critical_section_enter once (unless NULL) and then critical_section_leave once (unless NULL).
/// If the handle pointer is NULL, the behavior is undefined.
O1HeapDiagnostics o1heapGetDiagnostics(const O1HeapInstance *const handle);
#ifdef __cplusplus
}
#endif
#endif // O1HEAP_H_INCLUDED

@ -1 +0,0 @@
Subproject commit 0a773b93ce5c94e1d2791b180058cb9897fab7e1

View File

@ -1,172 +0,0 @@
#include "socketcan.h"
#include <stdio.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <string.h>
int16_t socketcanOpen(CanardSocketInstance *ins, const char *const can_iface_name, const bool can_fd)
{
struct sockaddr_can addr;
struct ifreq ifr;
ins->can_fd = can_fd;
/* open socket */
if ((ins->s = socket(PF_CAN, SOCK_RAW, CAN_RAW)) < 0) {
perror("socket");
return -1;
}
strncpy(ifr.ifr_name, can_iface_name, IFNAMSIZ - 1);
ifr.ifr_name[IFNAMSIZ - 1] = '\0';
ifr.ifr_ifindex = if_nametoindex(ifr.ifr_name);
if (!ifr.ifr_ifindex) {
perror("if_nametoindex");
return -1;
}
memset(&addr, 0, sizeof(addr));
addr.can_family = AF_CAN;
addr.can_ifindex = ifr.ifr_ifindex;
const int on = 1;
/* RX Timestamping */
if (setsockopt(ins->s, SOL_SOCKET, SO_TIMESTAMP, &on, sizeof(on)) < 0) {
perror("SO_TIMESTAMP is disabled");
return -1;
}
/* NuttX Feature: Enable TX deadline when sending CAN frames
* When a deadline occurs the driver will remove the CAN frame
*/
if (setsockopt(ins->s, SOL_CAN_RAW, CAN_RAW_TX_DEADLINE, &on, sizeof(on)) < 0) {
perror("CAN_RAW_TX_DEADLINE is disabled");
return -1;
}
if (can_fd) {
if (setsockopt(ins->s, SOL_CAN_RAW, CAN_RAW_FD_FRAMES, &on, sizeof(on)) < 0) {
perror("no CAN FD support");
return -1;
}
}
if (bind(ins->s, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
perror("bind");
return -1;
}
// Setup TX msg
ins->send_iov.iov_base = &ins->send_frame;
if (ins->can_fd) {
ins->send_iov.iov_len = sizeof(struct canfd_frame);
} else {
ins->send_iov.iov_len = sizeof(struct can_frame);
}
memset(&ins->send_control, 0x00, sizeof(ins->send_control));
ins->send_msg.msg_iov = &ins->send_iov;
ins->send_msg.msg_iovlen = 1;
ins->send_msg.msg_control = &ins->send_control;
ins->send_msg.msg_controllen = sizeof(ins->send_control);
ins->send_cmsg = CMSG_FIRSTHDR(&ins->send_msg);
ins->send_cmsg->cmsg_level = SOL_CAN_RAW;
ins->send_cmsg->cmsg_type = CAN_RAW_TX_DEADLINE;
ins->send_cmsg->cmsg_len = sizeof(struct timeval);
ins->send_tv = (struct timeval *)CMSG_DATA(&ins->send_cmsg);
// Setup RX msg
ins->recv_iov.iov_base = &ins->recv_frame;
if (can_fd) {
ins->recv_iov.iov_len = sizeof(struct canfd_frame);
} else {
ins->recv_iov.iov_len = sizeof(struct can_frame);
}
memset(&ins->recv_control, 0x00, sizeof(ins->recv_control));
ins->recv_msg.msg_iov = &ins->recv_iov;
ins->recv_msg.msg_iovlen = 1;
ins->recv_msg.msg_control = &ins->recv_control;
ins->recv_msg.msg_controllen = sizeof(ins->recv_control);
ins->recv_cmsg = CMSG_FIRSTHDR(&ins->recv_msg);
return 0;
}
int16_t socketcanTransmit(CanardSocketInstance *ins, const CanardFrame *txf)
{
/* Copy CanardFrame to can_frame/canfd_frame */
if (ins->can_fd) {
ins->send_frame.can_id = txf->extended_can_id;
ins->send_frame.can_id |= CAN_EFF_FLAG;
ins->send_frame.len = txf->payload_size;
memcpy(&ins->send_frame.data, txf->payload, txf->payload_size);
} else {
struct can_frame *frame = (struct can_frame *)&ins->send_frame;
frame->can_id = txf->extended_can_id;
frame->can_id |= CAN_EFF_FLAG;
frame->can_dlc = txf->payload_size;
memcpy(&frame->data, txf->payload, txf->payload_size);
}
/* Set CAN_RAW_TX_DEADLINE timestamp */
ins->send_tv->tv_usec = txf->timestamp_usec % 1000000ULL;
ins->send_tv->tv_sec = (txf->timestamp_usec - ins->send_tv->tv_usec) / 1000000ULL;
return sendmsg(ins->s, &ins->send_msg, 0);
}
int16_t socketcanReceive(CanardSocketInstance *ins, CanardFrame *rxf)
{
int32_t result = recvmsg(ins->s, &ins->recv_msg, 0);
if (result < 0) {
return result;
}
/* Copy CAN frame to CanardFrame */
if (ins->can_fd) {
struct canfd_frame *recv_frame = (struct canfd_frame *)&ins->recv_frame;
rxf->extended_can_id = recv_frame->can_id & CAN_EFF_MASK;
rxf->payload_size = recv_frame->len;
rxf->payload = &recv_frame->data;
} else {
struct can_frame *recv_frame = (struct can_frame *)&ins->recv_frame;
rxf->extended_can_id = recv_frame->can_id & CAN_EFF_MASK;
rxf->payload_size = recv_frame->can_dlc;
rxf->payload = &recv_frame->data; //FIXME either copy or clearly state the pointer reference
}
/* Read SO_TIMESTAMP value */
if (ins->recv_cmsg->cmsg_level == SOL_SOCKET && ins->recv_cmsg->cmsg_type == SO_TIMESTAMP) {
struct timeval *tv = (struct timeval *)CMSG_DATA(ins->recv_cmsg);
rxf->timestamp_usec = tv->tv_sec * 1000000ULL + tv->tv_usec;
}
return result;
}
/* TODO implement corresponding IOCTL */
int16_t socketcanConfigureFilter(const fd_t fd, const size_t num_filters, const struct can_filter *filters)
{
return -1;
}

View File

@ -1,69 +0,0 @@
#ifndef SOCKETCAN_H_INCLUDED
#define SOCKETCAN_H_INCLUDED
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <nuttx/can.h>
#include <netpacket/can.h>
#include <canard.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct CanardSocketInstance CanardSocketInstance;
typedef int fd_t;
struct CanardSocketInstance {
fd_t s;
bool can_fd;
//// Send msg structure
struct iovec send_iov;
struct canfd_frame send_frame;
struct msghdr send_msg;
struct cmsghdr *send_cmsg;
struct timeval *send_tv; /* TX deadline timestamp */
uint8_t send_control[sizeof(struct cmsghdr) + sizeof(struct timeval)];
//// Receive msg structure
struct iovec recv_iov;
struct canfd_frame recv_frame;
struct msghdr recv_msg;
struct cmsghdr *recv_cmsg;
uint8_t recv_control[sizeof(struct cmsghdr) + sizeof(struct timeval)];
};
/// Creates a SocketCAN socket for corresponding iface can_iface_name
/// Also sets up the message structures required for socketcanTransmit & socketcanReceive
/// can_fd determines to use CAN FD frame when is 1, and classical CAN frame when is 0
/// The return value is 0 on succes and -1 on error
int16_t socketcanOpen(CanardSocketInstance *ins, const char *const can_iface_name, const bool can_fd);
/// Send a CanardFrame to the CanardSocketInstance socket
/// This function is blocking
/// The return value is number of bytes transferred, negative value on error.
int16_t socketcanTransmit(CanardSocketInstance *ins, const CanardFrame *txf);
/// Receive a CanardFrame from the CanardSocketInstance socket
/// This function is blocking
/// The return value is number of bytes received, negative value on error.
int16_t socketcanReceive(CanardSocketInstance *ins, CanardFrame *rxf);
// TODO implement ioctl for CAN filter
int16_t socketcanConfigureFilter(const fd_t fd, const size_t num_filters, const struct can_filter *filters);
#ifdef __cplusplus
}
#endif
#endif //SOCKETCAN_H_INCLUDED

View File

@ -1,108 +0,0 @@
/*
* uorb_converter.c
*
* Created on: Apr 10, 2020
* Author: hovergames
*/
#include <inttypes.h>
#include <string.h>
#include "uorb_converter.h"
/****************************************************************************
* Private Data
****************************************************************************/
int uorb_sub_fd;
px4_pollfd_struct_t fds[1];
int error_counter;
CanardInstance *canard_ins;
int raw_uorb_message_transfer_id;
int fix_message_transfer_id;
int aux_message_transfer_id;
int16_t *gps_raw_uorb_port_id;
int16_t *gps_fix_port_id;
int16_t *gps_aux_port_id;
void uorbConverterInit(CanardInstance *ins, int16_t *raw_uorb_port_id, int16_t *fix_port_id, int16_t *aux_port_id)
{
canard_ins = ins;
gps_raw_uorb_port_id = raw_uorb_port_id;
gps_fix_port_id = fix_port_id;
gps_aux_port_id = aux_port_id;
/* subscribe to the uORB topic */
uorb_sub_fd = orb_subscribe(ORB_ID(sensor_gps));
orb_set_interval(uorb_sub_fd, 200);
/* one could wait for multiple topics with this technique, just using one here */
fds[0].fd = uorb_sub_fd;
fds[0].events = POLLIN;
error_counter = 0;
raw_uorb_message_transfer_id = 0;
fix_message_transfer_id = 0;
aux_message_transfer_id = 0;
}
void uorbProcessSub(int timeout_msec)
{
/* wait for subscriber update of 1 file descriptor for timeout_msec ms */
int poll_ret = px4_poll(fds, 1, timeout_msec);
/* handle the poll result */
if (poll_ret == 0) {
/* this means none of our providers is giving us data */
} else if (poll_ret < 0) {
/* this is seriously bad - should be an emergency */
if (error_counter < 10 || error_counter % 50 == 0) {
/* use a counter to prevent flooding (and slowing us down) */
PX4_ERR("ERROR return value from poll(): %d", poll_ret);
}
error_counter++;
} else {
if (fds[0].revents & POLLIN) {
/* obtained data for the first file descriptor */
struct sensor_gps_s raw;
/* copy sensors raw data into local buffer */
orb_copy(ORB_ID(sensor_gps), uorb_sub_fd, &raw);
CanardMicrosecond transmission_deadline = getMonotonicTimestampUSec() + (uint64_t)(1000ULL * 100ULL);
// raw uORB sensor_gps message
if (*gps_raw_uorb_port_id != -1) {
CanardTransfer transfer = {
.timestamp_usec = transmission_deadline, // Zero if transmission deadline is not limited.
.priority = CanardPriorityNominal,
.transfer_kind = CanardTransferKindMessage,
.port_id = *gps_raw_uorb_port_id, // This is the subject-ID.
.remote_node_id = CANARD_NODE_ID_UNSET, // Messages cannot be unicast, so use UNSET.
.transfer_id = raw_uorb_message_transfer_id,
.payload_size = sizeof(struct sensor_gps_s),
.payload = &raw,
};
++raw_uorb_message_transfer_id; // The transfer-ID shall be incremented after every transmission on this subject.
int32_t result = canardTxPush(canard_ins, &transfer);
if (result < 0) {
// An error has occurred: either an argument is invalid or we've ran out of memory.
// It is possible to statically prove that an out-of-memory will never occur for a given application if the
// heap is sized correctly; for background, refer to the Robson's Proof and the documentation for O1Heap.
PX4_ERR("canardTxPush error %" PRId32, result);
}
}
PX4_INFO("Recv from uORB");
}
}
}

View File

@ -1,34 +0,0 @@
/*
* uorb_converter.h
*
* Created on: Apr 10, 2020
* Author: hovergames
*/
#ifndef SRC_DRIVERS_UAVCANNODE_V1_UORB_CONVERTER_H_
#define SRC_DRIVERS_UAVCANNODE_V1_UORB_CONVERTER_H_
/* uORB */
#include <px4_platform_common/posix.h>
#include <uORB/uORB.h>
#include <uORB/topics/sensor_gps.h>
/* canard */
#include <canard.h>
#include <canard_dsdl.h>
/* monotonic timestamp */
#include "libcancl/time.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
void uorbConverterInit(CanardInstance *ins, int16_t *raw_uorb_port_id, int16_t *fix_port_id, int16_t *aux_port_id);
void uorbProcessSub(int timeout_msec);
#endif /* SRC_DRIVERS_UAVCANNODE_V1_UORB_CONVERTER_H_ */