uorb: compress format definitions

Reduces flash usage by ~16KB.

- compress formats at build-time into a single string with all formats
- then at runtime iteratively decompress using
  https://github.com/atomicobject/heatshrink
This commit is contained in:
Beat Küng 2023-08-21 16:02:14 +02:00 committed by Thomas Stastny
parent 142e44c418
commit 1ad5a9de08
27 changed files with 1736 additions and 252 deletions

3
.gitmodules vendored
View File

@ -72,3 +72,6 @@
path = src/modules/zenoh/zenoh-pico
url = https://github.com/px4/zenoh-pico
branch = pr-zubf-werror-fix
[submodule "src/lib/heatshrink/heatshrink"]
path = src/lib/heatshrink/heatshrink
url = https://github.com/PX4/heatshrink.git

View File

@ -25,6 +25,7 @@ exec find boards msg src platforms test \
-path src/lib/crypto/monocypher -prune -o \
-path src/lib/crypto/libtomcrypt -prune -o \
-path src/lib/crypto/libtommath -prune -o \
-path src/lib/heatshrink/heatshrink -prune -o \
-path src/modules/uxrce_dds_client/Micro-XRCE-DDS-Client -prune -o \
-path src/lib/cdrstream/cyclonedds -prune -o \
-path src/lib/cdrstream/rosidl -prune -o \

View File

@ -0,0 +1,232 @@
#!/usr/bin/env python3
#############################################################################
#
# Copyright (C) 2023 PX4 Pro Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#############################################################################
"""
Generates cpp source + header files with compressed uorb topic fields from json files
"""
import argparse
import json
import struct
from operator import itemgetter
import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../src/lib/heatshrink'))
import heatshrink_encode
def parse_json_files(json_files: [str]) -> dict:
"""Read list of json files into a dict"""
definitions = {}
for json_file in json_files:
with open(json_file, encoding='utf-8') as file_handle:
definition = json.load(file_handle)
assert definition['name'] not in definitions
definitions[definition['name']] = definition
definitions[definition['name']]['completed'] = False
return definitions
def get_ordered_list_by_dependency(name: str, definitions: dict) -> [str]:
"""Iterate dependency graph and create an ordered list"""
if definitions[name]['completed']:
return []
ret = []
# Get nested types first (DFS)
for dependency in definitions[name]['dependencies']:
ret.extend(get_ordered_list_by_dependency(dependency, definitions))
ret.append(name)
definitions[name]['completed'] = True
return ret
def get_field_definitions(names: [str], definitions: dict) -> (bytes, [str]):
"""Get byte array with all definitions"""
ret = bytes()
formats_list = []
for name in names:
# Format as '<# orb_ids><orb_id0...><# orb_ids dependencies<orb_id_dependency0...><fields><null>'
assert len(definitions[name]['orb_ids']) < 255
assert len(definitions[name]['dependencies']) < 255
ret += struct.pack('<B', len(definitions[name]['orb_ids']))
for orb_id in definitions[name]['orb_ids']:
assert orb_id < (1 << 16)
ret += struct.pack('<H', orb_id)
# Dependencies
ret += struct.pack('<B', len(definitions[name]['dependencies']))
for dependent_message_name in definitions[name]['dependencies']:
# Get ORB ID by looking up the name in all definitions
dependent_orb_id_list = [definitions[k]['main_orb_id'] for k in definitions if
definitions[k]['name'] == dependent_message_name]
assert len(dependent_orb_id_list) == 1
orb_id = dependent_orb_id_list[0]
assert (1 << 16) > orb_id >= 0
ret += struct.pack('<H', orb_id)
ret += bytes(definitions[name]['fields'], 'latin1')
ret += b'\0'
formats_list.append(definitions[name]['fields'])
return ret, formats_list
def write_fields_to_cpp_file(file_name: str, compressed_fields: bytes):
fields_str = ', '.join(str(c) for c in compressed_fields)
with open(file_name, 'w') as file_handle:
file_handle.write('''
// Auto-generated from px4_generate_uorb_compressed_fields.py
#include <uORB/topics/uORBMessageFieldsGenerated.hpp>
namespace uORB {
static const uint8_t compressed_fields[] = {
{FIELDS}
};
const uint8_t* orb_compressed_message_formats()
{
return compressed_fields;
}
unsigned orb_compressed_message_formats_size()
{
return sizeof(compressed_fields) / sizeof(compressed_fields[0]);
}
} // namespace uORB
'''.replace('{FIELDS}', fields_str))
def c_encode(s, encoding='ascii'):
result = ''
for c in s:
if not (32 <= ord(c) < 127) or c in ('\\', '"'):
result += '\\%03o' % ord(c)
else:
result += c
return '"' + result + '"'
def write_fields_to_hpp_file(file_name: str, definitions: dict, window_length: int, lookahead_length: int,
format_list: [str]):
max_tokenized_field_length, max_tokenized_field_length_msg = max(
((len(definitions[k]['fields']), k) for k in definitions), key=itemgetter(0))
max_num_orb_ids = max(len(definitions[k]['orb_ids']) for k in definitions)
max_num_orb_id_dependencies = max(len(definitions[k]['dependencies']) for k in definitions)
with open(file_name, 'w') as file_handle:
file_handle.write('''
// Auto-generated from px4_generate_uorb_compressed_fields.py
#include <cstdint>
namespace uORB {
/**
* Get compressed string of all uorb message format definitions
*/
const uint8_t* orb_compressed_message_formats();
/**
* Get length of compressed message format definitions
*/
unsigned orb_compressed_message_formats_size();
static constexpr unsigned orb_tokenized_fields_max_length = {MAX_TOKENIZED_FIELD_LENGTH}; // {MAX_TOKENIZED_FIELD_LENGTH_MSG}
static constexpr unsigned orb_compressed_max_num_orb_ids = {MAX_NUM_ORB_IDS};
static constexpr unsigned orb_compressed_max_num_orb_id_dependencies = {MAX_NUM_ORB_ID_DEPENDENCIES};
static constexpr unsigned orb_compressed_heatshrink_window_length = {WINDOW_LENGTH};
static constexpr unsigned orb_compressed_heatshrink_lookahead_length = {LOOKAHEAD_LENGTH};
#define ORB_DECOMPRESSED_MESSAGE_FIELDS {{DECOMPRESSED_MESSAGE_FIELDS}}
} // namespace uORB
'''
.replace('{MAX_TOKENIZED_FIELD_LENGTH}', str(max_tokenized_field_length))
.replace('{MAX_TOKENIZED_FIELD_LENGTH_MSG}', max_tokenized_field_length_msg)
.replace('{MAX_NUM_ORB_IDS}', str(max_num_orb_ids))
.replace('{MAX_NUM_ORB_ID_DEPENDENCIES}', str(max_num_orb_id_dependencies))
.replace('{WINDOW_LENGTH}', str(window_length))
.replace('{LOOKAHEAD_LENGTH}', str(lookahead_length))
.replace('{DECOMPRESSED_MESSAGE_FIELDS}', ','.join(c_encode(x) for x in format_list))
)
def main():
parser = argparse.ArgumentParser(description='Generate compressed uorb topic fields')
parser.add_argument('-f', dest='file',
help="json input files",
nargs="+")
parser.add_argument('--source-output-file', dest='output_cpp',
help='cpp output file to generate')
parser.add_argument('--header-output-file', dest='output_hpp',
help='hpp output file to generate')
parser.add_argument('-v', '--verbose',
action='store_true',
help="verbose output")
args = parser.parse_args()
if args.file is not None:
definitions = parse_json_files(args.file)
# Get array of all field definitions
names = []
for definition in definitions:
names.extend(get_ordered_list_by_dependency(definitions[definition]['name'], definitions))
names.reverse() # Dependent definitions must be after
assert len(names) == len(definitions)
for definition in definitions: # sanity check
assert definitions[definition]['completed']
field_definitions, format_list = get_field_definitions(names, definitions)
# Compress
window_size = 8 # Larger value = better compression; memory requirement (for decompression): 2 ^ window_size
lookahead = 4
compressed_field_definitions = heatshrink_encode.encode(field_definitions, window_size, lookahead)
if args.verbose:
print(
f'Field definitions: size: {len(field_definitions)}, reduction from compression: {len(field_definitions) - len(compressed_field_definitions)}')
# Write cpp & hpp file
write_fields_to_cpp_file(args.output_cpp, compressed_field_definitions)
write_fields_to_hpp_file(args.output_hpp, definitions, window_size, lookahead, format_list)
if __name__ == "__main__":
main()

View File

@ -70,9 +70,8 @@ __license__ = "BSD"
__email__ = "thomasgubler@gmail.com"
TEMPLATE_FILE = ['msg.h.em', 'msg.cpp.em', 'uorb_idl_header.h.em']
TOPICS_LIST_TEMPLATE_FILE = ['uORBTopics.hpp.em', 'uORBTopics.cpp.em']
OUTPUT_FILE_EXT = ['.h', '.cpp', '.h']
TEMPLATE_FILE = ['msg.h.em', 'msg.cpp.em', 'uorb_idl_header.h.em', 'msg.json.em']
TOPICS_LIST_TEMPLATE_FILE = ['uORBTopics.hpp.em', 'uORBTopics.cpp.em', None, None]
INCL_DEFAULT = ['std_msgs:./msg/std_msgs']
PACKAGE = 'px4'
TOPICS_TOKEN = '# TOPICS '
@ -105,7 +104,7 @@ def get_topics(filename):
return result
def generate_output_from_file(format_idx, filename, outputdir, package, templatedir, includepath):
def generate_output_from_file(format_idx, filename, outputdir, package, templatedir, includepath, all_topics):
"""
Converts a single .msg file to an uorb header/source file
"""
@ -155,6 +154,7 @@ def generate_output_from_file(format_idx, filename, outputdir, package, template
"msg_context": msg_context,
"spec": spec,
"topics": topics,
"all_topics": all_topics,
}
# Make sure output directory exists:
@ -162,10 +162,11 @@ def generate_output_from_file(format_idx, filename, outputdir, package, template
os.makedirs(outputdir)
template_file = os.path.join(templatedir, TEMPLATE_FILE[format_idx])
extension = os.path.splitext(os.path.splitext(TEMPLATE_FILE[format_idx])[0])[1]
if format_idx == 2:
output_file = os.path.join(outputdir, file_base_name + OUTPUT_FILE_EXT[format_idx])
output_file = os.path.join(outputdir, file_base_name + extension)
else:
output_file = os.path.join(outputdir, full_type_name_snake + OUTPUT_FILE_EXT[format_idx])
output_file = os.path.join(outputdir, full_type_name_snake + extension)
return generate_by_template(output_file, template_file, em_globals)
@ -195,17 +196,13 @@ def generate_by_template(output_file, template_file, em_globals):
return True
def generate_topics_list_file_from_files(files, outputdir, template_filename, templatedir):
def generate_topics_list_file_from_files(files, outputdir, template_filename, templatedir, all_topics):
# generate cpp file with topics list
filenames = []
for filename in [os.path.basename(p) for p in files if os.path.basename(p).endswith(".msg")]:
filenames.append(re.sub(r'(?<!^)(?=[A-Z])', '_', filename).lower())
topics = []
for msg_filename in files:
topics.extend(get_topics(msg_filename))
tl_globals = {"msgs": filenames, "topics": topics}
tl_globals = {"msgs": filenames, "all_topics": all_topics}
tl_template_file = os.path.join(templatedir, template_filename)
tl_out_file = os.path.join(outputdir, template_filename.replace(".em", ""))
@ -222,8 +219,9 @@ if __name__ == "__main__":
parser.add_argument('--headers', help='Generate header files', action='store_true')
parser.add_argument('--sources', help='Generate source files', action='store_true')
parser.add_argument('--uorb-idl-header', help='Generate uORB compatible idl header', action='store_true')
parser.add_argument('--json', help='Generate json files', action='store_true')
parser.add_argument('-f', dest='file',
help="files to convert (use only without -d)",
help="files to convert",
nargs="+")
parser.add_argument('-i', dest="include_paths",
help='Additional Include Paths', nargs="*",
@ -247,17 +245,21 @@ if __name__ == "__main__":
elif args.sources:
generate_idx = 1
elif args.uorb_idl_header:
for f in args.file:
print(f)
generate_output_from_file(2, f, args.outputdir, args.package, args.templatedir, INCL_DEFAULT)
exit(0)
generate_idx = 2
elif args.json:
generate_idx = 3
else:
print('Error: either --headers or --sources must be specified')
print('Error: either --headers, --sources or --json must be specified')
exit(-1)
if args.file is not None:
all_topics = []
for msg_filename in args.file:
all_topics.extend(get_topics(msg_filename))
all_topics.sort()
for f in args.file:
generate_output_from_file(generate_idx, f, args.outputdir, args.package, args.templatedir, INCL_DEFAULT)
generate_output_from_file(generate_idx, f, args.outputdir, args.package, args.templatedir, INCL_DEFAULT, all_topics)
# Generate topics list header and source file
if os.path.isfile(os.path.join(args.templatedir, TOPICS_LIST_TEMPLATE_FILE[generate_idx])):
generate_topics_list_file_from_files(args.file, args.outputdir, TOPICS_LIST_TEMPLATE_FILE[generate_idx], args.templatedir)
if TOPICS_LIST_TEMPLATE_FILE[generate_idx] is not None and os.path.isfile(os.path.join(args.templatedir, TOPICS_LIST_TEMPLATE_FILE[generate_idx])):
generate_topics_list_file_from_files(args.file, args.outputdir, TOPICS_LIST_TEMPLATE_FILE[generate_idx], args.templatedir, all_topics)

View File

@ -14,6 +14,7 @@
@# - spec (msggen.MsgSpec) Parsed specification of the .msg file
@# - search_path (dict) search paths for genmsg
@# - topics (List of String) topic names
@# - all_topics (List of String) all generated topic names (sorted)
@###############################################
/****************************************************************************
*
@ -59,7 +60,6 @@ uorb_struct = '%s_s'%name_snake_case
sorted_fields = sorted(spec.parsed_fields(), key=sizeof_field_type, reverse=True)
struct_size, padding_end_size = add_padding_bytes(sorted_fields, search_path)
topic_fields = ["%s %s" % (convert_type(field.type, True), field.name) for field in sorted_fields]
}@
#include <inttypes.h>
@ -72,12 +72,9 @@ topic_fields = ["%s %s" % (convert_type(field.type, True), field.name) for field
#include <lib/matrix/matrix/math.hpp>
#include <lib/mathlib/mathlib.h>
@# join all msg files in one line e.g: "float[3] position;float[3] velocity;bool armed"
@# This is used for the logger
constexpr char __orb_@(name_snake_case)_fields[] = "@( ";".join(topic_fields) );";
@[for topic in topics]@
ORB_DEFINE(@topic, struct @uorb_struct, @(struct_size-padding_end_size), __orb_@(name_snake_case)_fields, static_cast<orb_id_size_t>(ORB_ID::@topic));
static_assert(static_cast<orb_id_size_t>(ORB_ID::@topic) == @(all_topics.index(topic)), "ORB_ID index mismatch");
ORB_DEFINE(@topic, struct @uorb_struct, @(struct_size-padding_end_size), static_cast<orb_id_size_t>(ORB_ID::@topic));
@[end for]
void print_message(const orb_metadata *meta, const @uorb_struct& message)

View File

@ -0,0 +1,49 @@
@###############################################
@#
@# PX4 ROS compatible message source code
@# generation for C++
@#
@# EmPy template for generating <msg>.h files
@# Based on the original template for ROS
@#
@###############################################
@# Start of Template
@#
@# Context:
@# - file_name_in (String) Source file
@# - spec (msggen.MsgSpec) Parsed specification of the .msg file
@# - search_path (dict) search paths for genmsg
@# - topics (List of String) topic names
@# - all_topics (List of String) all generated topic names (sorted)
@###############################################
@{
import genmsg.msgs
import json
from px_generate_uorb_topic_helper import * # this is in Tools/
uorb_struct = '%s_s'%name_snake_case
sorted_fields = sorted(spec.parsed_fields(), key=sizeof_field_type, reverse=True)
struct_size, padding_end_size = add_padding_bytes(sorted_fields, search_path)
topic_fields = ["%s %s" % (convert_type(field.type, True), field.name) for field in sorted_fields]
dependencies = []
for field in spec.parsed_fields():
if not field.is_header:
type_name = field.type
# detect embedded types
sl_pos = type_name.find('/')
if sl_pos >= 0: # nested type
dependencies.append(field.base_type)
}@
{
@# join all msg files in one line e.g: "float[3] position;float[3] velocity;bool armed;"
"fields": @( json.dumps(bytearray(";".join(topic_fields)+";", 'utf-8').decode('unicode_escape')) ),
"orb_ids": @( json.dumps([ all_topics.index(topic) for topic in topics]) ),
"main_orb_id": @( all_topics.index(name_snake_case) if name_snake_case in all_topics else -1 ),
"dependencies": @( json.dumps(list(set(dependencies))) ),
"name": "@( spec.full_name )"
}

View File

@ -8,7 +8,7 @@
@#
@# Context:
@# - msgs (List) list of all msg files
@# - multi_topics (List) list of all multi-topic names
@# - all_topics (List) list of all topic names (sorted)
@###############################################
/****************************************************************************
*
@ -50,9 +50,7 @@ msg_names = list(set([mn.replace(".msg", "") for mn in msgs])) # set() filters d
msg_names.sort()
msgs_count = len(msg_names)
topic_names = list(set(topics)) # set() filters duplicates
topic_names.sort()
topics_count = len(topics)
topics_count = len(all_topics)
}@
@[for msg_name in msg_names]@
@ -60,8 +58,8 @@ topics_count = len(topics)
@[end for]
const constexpr struct orb_metadata *const uorb_topics_list[ORB_TOPICS_COUNT] = {
@[for idx, topic_name in enumerate(topic_names, 1)]@
ORB_ID(@(topic_name))@[if idx != topic_names], @[end if]
@[for idx, topic_name in enumerate(all_topics, 1)]@
ORB_ID(@(topic_name))@[if idx != all_topics], @[end if]
@[end for]
};

View File

@ -7,7 +7,8 @@
@# Start of Template
@#
@# Context:
@# - topics (List) list of all topic names
@# - msgs (List) list of all msg files
@# - all_topics (List) list of all topic names (sorted)
@###############################################
/****************************************************************************
*
@ -43,9 +44,7 @@
****************************************************************************/
@{
topics_count = len(topics)
topic_names_all = list(set(topics)) # set() filters duplicates
topic_names_all.sort()
topics_count = len(all_topics)
}@
#pragma once
@ -63,7 +62,7 @@ static constexpr size_t orb_topics_count() { return ORB_TOPICS_COUNT; }
extern const struct orb_metadata *const *orb_get_topics() __EXPORT;
enum class ORB_ID : orb_id_size_t {
@[for idx, topic_name in enumerate(topic_names_all)]@
@[for idx, topic_name in enumerate(all_topics)]@
@(topic_name) = @(idx),
@[end for]
INVALID

View File

@ -253,6 +253,7 @@ set(msg_source_out_path ${CMAKE_CURRENT_BINARY_DIR}/topics_sources)
set(uorb_headers)
set(uorb_sources)
set(uorb_ucdr_headers)
set(uorb_json_files)
foreach(msg_file ${msg_files})
get_filename_component(msg ${msg_file} NAME_WE)
@ -264,11 +265,9 @@ foreach(msg_file ${msg_files})
list(APPEND uorb_headers ${msg_out_path}/${msg}.h)
list(APPEND uorb_sources ${msg_source_out_path}/${msg}.cpp)
list(APPEND uorb_ucdr_headers ${ucdr_out_path}/${msg}.h)
list(APPEND uorb_json_files ${msg_source_out_path}/${msg}.json)
endforeach()
# set parent scope msg_files for other modules to consume (eg topic_listener)
set(msg_files ${msg_files} PARENT_SCOPE)
# Generate uORB headers
add_custom_command(
OUTPUT
@ -292,6 +291,44 @@ add_custom_command(
)
add_custom_target(uorb_headers DEPENDS ${uorb_headers})
add_custom_command(
OUTPUT
${uorb_json_files}
COMMAND ${PYTHON_EXECUTABLE} ${PX4_SOURCE_DIR}/Tools/msg/px_generate_uorb_topic_files.py
--json
-f ${msg_files}
-i ${CMAKE_CURRENT_SOURCE_DIR}
-o ${msg_source_out_path}
-e ${PX4_SOURCE_DIR}/Tools/msg/templates/uorb
DEPENDS
${msg_files}
${PX4_SOURCE_DIR}/Tools/msg/templates/uorb/msg.json.em
${PX4_SOURCE_DIR}/Tools/msg/px_generate_uorb_topic_files.py
${PX4_SOURCE_DIR}/Tools/msg/px_generate_uorb_topic_helper.py
COMMENT "Generating uORB json files"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
VERBATIM
)
add_custom_target(uorb_json_files DEPENDS ${uorb_json_files})
set(uorb_message_fields_cpp_file ${msg_source_out_path}/uORBMessageFieldsGenerated.cpp)
set(uorb_message_fields_header_file ${msg_out_path}/uORBMessageFieldsGenerated.hpp)
add_custom_command(
OUTPUT
${uorb_message_fields_cpp_file}
${uorb_message_fields_header_file}
COMMAND ${PYTHON_EXECUTABLE} ${PX4_SOURCE_DIR}/Tools/msg/px_generate_uorb_compressed_fields.py
-f ${uorb_json_files}
--source-output-file ${uorb_message_fields_cpp_file}
--header-output-file ${uorb_message_fields_header_file}
DEPENDS
uorb_json_files
${PX4_SOURCE_DIR}/Tools/msg/px_generate_uorb_compressed_fields.py
COMMENT "Generating uORB compressed fields"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
VERBATIM
)
# Generate microcdr headers
add_custom_command(
OUTPUT ${uorb_ucdr_headers}
@ -334,7 +371,7 @@ add_custom_command(
VERBATIM
)
add_library(uorb_msgs ${uorb_headers} ${msg_out_path}/uORBTopics.hpp ${uorb_sources} ${msg_source_out_path}/uORBTopics.cpp)
add_library(uorb_msgs ${uorb_headers} ${msg_out_path}/uORBTopics.hpp ${uorb_sources} ${msg_source_out_path}/uORBTopics.cpp ${uorb_message_fields_cpp_file})
target_link_libraries(uorb_msgs PRIVATE m)
add_dependencies(uorb_msgs prebuild_targets uorb_headers)

View File

@ -54,6 +54,8 @@ set(SRCS_COMMON
uORBCommon.hpp
uORBCommunicator.hpp
uORBManager.hpp
uORBMessageFields.cpp
uORBMessageFields.hpp
uORBUtils.cpp
uORBUtils.hpp
uORBDeviceMaster.hpp
@ -76,7 +78,7 @@ if (NOT DEFINED CONFIG_BUILD_FLAT AND "${PX4_PLATFORM}" MATCHES "nuttx")
${SRCS_COMMON}
${SRCS_KERNEL}
)
target_link_libraries(uORB_kernel PRIVATE cdev uorb_msgs nuttx_mm)
target_link_libraries(uORB_kernel PRIVATE cdev uorb_msgs nuttx_mm heatshrink)
target_compile_options(uORB_kernel PRIVATE ${MAX_CUSTOM_OPT_LEVEL} -D__KERNEL__)
# User side library in nuttx kernel/protected build
@ -102,9 +104,11 @@ else()
target_link_libraries(uORB PRIVATE cdev)
endif()
target_link_libraries(uORB PRIVATE uorb_msgs)
target_link_libraries(uORB PRIVATE uorb_msgs heatshrink)
target_compile_options(uORB PRIVATE ${MAX_CUSTOM_OPT_LEVEL})
if(PX4_TESTING)
add_subdirectory(uORB_tests)
endif()
px4_add_functional_gtest(SRC uORBMessageFieldsTest.cpp LINKLIBS uORB)

View File

@ -40,6 +40,7 @@
#include "uORBManager.hpp"
#include "uORBCommon.hpp"
#include "uORBMessageFields.hpp"
#include <lib/drivers/device/Device.hpp>
@ -196,7 +197,7 @@ int orb_get_interval(int handle, unsigned *interval)
const char *orb_get_c_type(unsigned char short_type)
{
// this matches with the uorb o_fields generator
// this matches with the uorb type_map_short python data
switch (short_type) {
case 0x82: return "int8_t";
@ -239,25 +240,29 @@ void orb_print_message_internal(const orb_metadata *meta, const void *data, bool
const uint8_t *data_ptr = (const uint8_t *)data;
int data_offset = 0;
for (int format_idx = 0; meta->o_fields[format_idx] != 0;) {
const char *end_field = strchr(meta->o_fields + format_idx, ';');
// Find message format
char format_buffer[128];
uORB::MessageFormatReader format_reader(format_buffer, sizeof(format_buffer));
if (!end_field) {
PX4_ERR("Format error in %s", meta->o_fields);
return;
}
if (!format_reader.readUntilFormat(meta->o_id)) {
PX4_ERR("Failed to get uorb format");
return;
}
const char *c_type = orb_get_c_type(meta->o_fields[format_idx]);
const int end_field_idx = end_field - meta->o_fields;
int field_length = 0;
while (format_reader.readNextField(field_length)) {
const char *c_type = orb_get_c_type(format_buffer[0]);
int array_idx = -1;
int field_name_idx = -1;
for (int field_idx = format_idx; field_idx != end_field_idx; ++field_idx) {
if (meta->o_fields[field_idx] == '[') {
for (int field_idx = 0; field_idx < field_length; ++field_idx) {
if (format_buffer[field_idx] == '[') {
array_idx = field_idx + 1;
} else if (meta->o_fields[field_idx] == ' ') {
} else if (format_buffer[field_idx] == ' ') {
field_name_idx = field_idx + 1;
break;
}
@ -266,19 +271,10 @@ void orb_print_message_internal(const orb_metadata *meta, const void *data, bool
int array_size = 1;
if (array_idx >= 0) {
array_size = strtol(meta->o_fields + array_idx, nullptr, 10);
array_size = strtol(format_buffer + array_idx, nullptr, 10);
}
char field_name[80];
size_t field_name_len = end_field_idx - field_name_idx;
if (field_name_len >= sizeof(field_name)) {
PX4_ERR("field name too long %s (max: %u)", meta->o_fields, (unsigned)sizeof(field_name));
return;
}
memcpy(field_name, meta->o_fields + field_name_idx, field_name_len);
field_name[field_name_len] = '\0';
const char *field_name = format_buffer + field_name_idx;
if (c_type) { // built-in type
bool dont_print = false;
@ -458,17 +454,10 @@ void orb_print_message_internal(const orb_metadata *meta, const void *data, bool
} else {
// extract the topic name
char topic_name[80];
const size_t topic_name_len = array_size > 1 ? array_idx - format_idx - 1 : field_name_idx - format_idx - 1;
if (topic_name_len >= sizeof(topic_name)) {
PX4_ERR("topic name too long in %s (max: %u)", meta->o_name, (unsigned)sizeof(topic_name));
return;
}
memcpy(topic_name, meta->o_fields + format_idx, topic_name_len);
topic_name[topic_name_len] = '\0';
// Get the topic name
const size_t topic_name_len = array_size > 1 ? array_idx - 1 : field_name_idx - 1;
format_buffer[topic_name_len] = '\0';
const char *topic_name = format_buffer;
// find the metadata
const orb_metadata *const *topics = orb_get_topics();
@ -499,7 +488,5 @@ void orb_print_message_internal(const orb_metadata *meta, const void *data, bool
data_offset += found_topic->o_size;
}
}
format_idx = end_field_idx + 1;
}
}

View File

@ -51,7 +51,6 @@ struct orb_metadata {
const char *o_name; /**< unique object name */
const uint16_t o_size; /**< object size */
const uint16_t o_size_no_padding; /**< object size w/o padding at the end (for logger) */
const char *o_fields; /**< semicolon separated list of fields (with type) */
orb_id_size_t o_id; /**< ORB_ID enum */
};
@ -100,15 +99,13 @@ typedef const struct orb_metadata *orb_id_t;
* @param _name The name of the topic.
* @param _struct The structure the topic provides.
* @param _size_no_padding Struct size w/o padding at the end
* @param _fields All fields in a semicolon separated list e.g: "float[3] position;bool armed"
* @param _orb_id_enum ORB ID enum e.g.: ORB_ID::vehicle_status
*/
#define ORB_DEFINE(_name, _struct, _size_no_padding, _fields, _orb_id_enum) \
#define ORB_DEFINE(_name, _struct, _size_no_padding, _orb_id_enum) \
const struct orb_metadata __orb_##_name = { \
#_name, \
sizeof(_struct), \
_size_no_padding, \
_fields, \
_orb_id_enum \
}; struct hack
@ -236,7 +233,7 @@ extern int orb_set_interval(int handle, unsigned interval) __EXPORT;
extern int orb_get_interval(int handle, unsigned *interval) __EXPORT;
/**
* Returns the C type string from a short type in o_fields metadata, or nullptr
* Returns the C type string from a short type in message fields metadata, or nullptr
* if not a short type
*/
const char *orb_get_c_type(unsigned char short_type);
@ -248,7 +245,6 @@ const char *orb_get_c_type(unsigned char short_type);
*/
void orb_print_message_internal(const struct orb_metadata *meta, const void *data, bool print_topic_name);
__END_DECLS
/* Diverse uORB header defines */ //XXX: move to better location

View File

@ -0,0 +1,342 @@
/****************************************************************************
*
* Copyright (c) 2023 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#include "uORBMessageFields.hpp"
#include <px4_platform_common/log.h>
namespace uORB
{
MessageFormatReader::State MessageFormatReader::readMore()
{
if (_state == State::Complete || _state == State::Failure) {
return _state;
}
if (_buffer_length == _buffer_capacity) {
_state = State::Failure;
PX4_ERR("buffer too small");
return _state;
}
const uint8_t *compressed_formats = orb_compressed_message_formats();
const unsigned compressed_formats_size = orb_compressed_message_formats_size();
if (_buffer_length == 0 && _compressed_formats_idx == compressed_formats_size) {
_state = State::Complete;
return _state;
}
const unsigned max_num_iterations = 5; // Safeguard, we're not expected to do more than a few iterations
for (unsigned iteration = 0; iteration < max_num_iterations; ++iteration) {
switch (_state) {
case State::ReadOrbIDs: {
int num_orb_ids = _buffer[0];
const unsigned orb_ids_size = 1 + num_orb_ids * sizeof(orb_id_size_t);
if (_buffer_length > orb_ids_size) {
int num_dependent_orb_ids = _buffer[orb_ids_size];
const unsigned orb_ids_dependent_size = 1 + num_dependent_orb_ids * sizeof(orb_id_size_t);
if (_buffer_length >= orb_ids_size + orb_ids_dependent_size) {
orb_id_size_t orb_id;
_state = State::ReadingFormat;
_format_length = 0;
_orb_ids.clear();
for (int i = 0; i < num_orb_ids; ++i) {
memcpy(&orb_id, &_buffer[1 + sizeof(orb_id_size_t) * i], sizeof(orb_id_size_t));
_orb_ids.push_back(orb_id);
}
_orb_ids_dependencies.clear();
for (int i = 0; i < num_dependent_orb_ids; ++i) {
memcpy(&orb_id, &_buffer[orb_ids_size + 1 + sizeof(orb_id_size_t) * i],
sizeof(orb_id_size_t));
_orb_ids_dependencies.push_back(orb_id);
}
memmove(_buffer, _buffer + orb_ids_size + orb_ids_dependent_size,
_buffer_length - orb_ids_size - orb_ids_dependent_size);
_buffer_length -= orb_ids_size + orb_ids_dependent_size;
return State::ReadOrbIDs;
}
}
if (_buffer_length == _buffer_capacity) {
_state = State::Failure;
PX4_ERR("buffer too small");
return _state;
}
break;
}
case State::ReadingFormat: {
const bool got_new_data = _format_length < _buffer_length;
for (; _format_length < _buffer_length; ++_format_length) {
if (_buffer[_format_length] == '\0') {
_state = State::FormatComplete;
return _state;
}
}
if (got_new_data) {
return _state;
}
}
break;
case State::FormatComplete:
if (_format_length != 0) {
PX4_ERR("Invalid API calls"); // Missing call to clearFormatFromBuffer or clearFormatAndRestoreLeftover
_state = State::Failure;
return _state;
}
_state = State::ReadOrbIDs;
break;
case State::Failure:
case State::Complete:
return _state;
}
// Decompress more data
size_t count = 0;
if (heatshrink_decoder_sink(&_hsd, &compressed_formats[_compressed_formats_idx],
compressed_formats_size - _compressed_formats_idx, &count) < 0) {
_state = State::Failure;
return _state;
}
_compressed_formats_idx += count;
if (_compressed_formats_idx == compressed_formats_size) {
const HSD_finish_res fres = heatshrink_decoder_finish(&_hsd);
if (fres != HSDR_FINISH_MORE && fres != HSDR_FINISH_DONE) {
_state = State::Failure;
return _state;
}
}
const HSD_poll_res pres = heatshrink_decoder_poll(&_hsd, reinterpret_cast<uint8_t *>(&_buffer[_buffer_length]),
_buffer_capacity - _buffer_length, &count);
_buffer_length += count;
if (HSDR_POLL_EMPTY != pres && HSDR_POLL_MORE != pres) {
_state = State::Failure;
return _state;
}
if (_compressed_formats_idx == compressed_formats_size) {
const HSD_finish_res fres = heatshrink_decoder_finish(&_hsd);
if (HSDR_FINISH_DONE != fres && HSDR_FINISH_MORE != fres) {
_state = State::Failure;
return _state;
}
}
}
// Not expected to get here
PX4_ERR("logic error");
_state = State::Failure;
return _state;
}
void MessageFormatReader::clearFormatFromBuffer()
{
if (_state == State::FormatComplete) {
++_format_length; // Include null char
memmove(_buffer, _buffer + _format_length, _buffer_length - _format_length);
_buffer_length -= _format_length;
} else {
// Full buffer is occupied with format
_buffer_length = 0;
}
_format_length = 0;
}
int MessageFormatReader::expandMessageFormat(char *format, unsigned len, unsigned buf_len)
{
++len; // Include null char
int format_idx = 0;
while (format[format_idx] != 0) {
const char *c_type = orb_get_c_type(format[format_idx]);
if (c_type) {
// Replace 1 char type with expanded c_type
const int c_type_len = (int)strlen(c_type);
if (len + c_type_len - 1 > buf_len) {
return -1;
}
memmove(format + format_idx + c_type_len, format + format_idx + 1, len - format_idx - 1);
memcpy(format + format_idx, c_type, c_type_len);
format_idx += c_type_len - 1;
len += c_type_len - 1;
}
// Go to next field
const char *end_field = strchr(format + format_idx, ';');
if (!end_field) {
PX4_ERR("Format error in %s", format);
return -1;
}
format_idx = (int)(end_field - format + 1);
}
if (format_idx + 1 != (int)len) {
PX4_ERR("logic error");
return -1;
}
return format_idx;
}
bool MessageFormatReader::readUntilFormat(orb_id_size_t orb_id)
{
bool done = false;
bool found_format = false;
while (!done && !found_format) {
switch (readMore()) {
case State::ReadOrbIDs:
for (const orb_id_size_t current_orb_id : orbIDs()) {
if (current_orb_id == orb_id) {
found_format = true;
}
}
break;
case State::ReadingFormat:
case State::FormatComplete:
clearFormatFromBuffer();
break;
case State::Complete:
case State::Failure:
done = true;
break;
default:
break;
}
}
return found_format;
}
bool MessageFormatReader::readNextField(int &field_length)
{
if (field_length > 0) {
// Move left-over part to beginning
++field_length; // include null
memmove(_buffer, _buffer + field_length, _buffer_length - field_length);
_buffer_length -= field_length;
_format_length -= field_length;
}
auto findFieldEnd = [&]() {
// Find ';'
bool found = false;
for (field_length = 0; field_length < (int)_format_length; ++field_length) {
if (_buffer[field_length] == ';') {
_buffer[field_length] = '\0';
found = true;
break;
}
}
return found;
};
// We might still have a field in the buffer
if (findFieldEnd()) {
return true;
}
bool done = false;
bool ret = false;
while (!done) {
switch (readMore()) {
case State::ReadingFormat:
if (findFieldEnd()) {
ret = true;
done = true;
}
break;
case State::FormatComplete: {
ret = findFieldEnd(); // Expected to return true here
done = true;
break;
}
case State::ReadOrbIDs: // Arrived at the next format -> we're done
case State::Complete:
case State::Failure:
done = true;
break;
}
}
return ret;
}
} // namespace uORB

View File

@ -0,0 +1,157 @@
/****************************************************************************
*
* Copyright (c) 2023 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#pragma once
#include <uORB/topics/uORBMessageFieldsGenerated.hpp>
#include <uORB/uORB.h>
#include <containers/Array.hpp>
#include <cstring>
#define HEATSHRINK_DYNAMIC_ALLOC 0
#include <lib/heatshrink/heatshrink/heatshrink_decoder.h>
namespace uORB
{
class MessageFormatReader
{
public:
enum class State {
ReadOrbIDs,
ReadingFormat,
FormatComplete,
Failure,
Complete
};
MessageFormatReader(char *buffer, unsigned buffer_capacity)
: _buffer(buffer), _buffer_capacity(buffer_capacity)
{
heatshrink_decoder_reset(&_hsd);
static_assert(orb_compressed_heatshrink_window_length == HEATSHRINK_STATIC_WINDOW_BITS, "window length mismatch");
static_assert(orb_compressed_heatshrink_lookahead_length == HEATSHRINK_STATIC_LOOKAHEAD_BITS,
"lookahead length mismatch");
_buffer[0] = 0;
}
/**
* Read and decompress more data into the given buffer (from the constructor).
* Call iteratively until completed or a failure happens.
* @return current state
*/
State readMore();
/**
* Read until the start of a format given an ORB ID
* @return true on success
*/
bool readUntilFormat(orb_id_size_t orb_id);
/**
* Iteratively read fields for the current format
* @param field_length [in,out] field length, set to 0 initially
* @return true while there is a field
*/
bool readNextField(int &field_length);
/**
* Current length of the buffer
*/
uint32_t bufferLength() const { return _buffer_length; }
/**
* Clear the buffer during ReadingFormat (if it does not need to be accumulated) or FormatComplete.
* After FormatComplete either this or clearFormatAndRestoreLeftover must be called.
*/
void clearFormatFromBuffer();
/**
* When FormatComplete, this can be called to move the remaining part after the format to the end of the buffer,
* allowing the buffer to be modified.
* @return length of the left-over part.
*/
unsigned moveLeftoverToBufferEnd()
{
_buffer_length -= _format_length + 1;
memmove(_buffer + _buffer_capacity - _buffer_length, _buffer + _format_length + 1, _buffer_length);
return _buffer_length;
}
/**
* After calling moveLeftoverToBufferEnd(), this must be called.
*/
void clearFormatAndRestoreLeftover()
{
memmove(_buffer, _buffer + _buffer_capacity - _buffer_length, _buffer_length);
_format_length = 0;
}
/**
* Get the (partial if ReadingFormat or complete if FormatComplete) format length in the buffer
*/
unsigned formatLength() const { return _format_length; }
/**
* In ReadOrbIDs, ReadingFormat or FormatComplete states, this returns the orb ID's accociated with the format.
*/
const px4::Array<orb_id_size_t, orb_compressed_max_num_orb_ids> &orbIDs() const { return _orb_ids; }
/**
* In ReadOrbIDs, ReadingFormat or FormatComplete states, this returns the dependent orb ID's accociated with the
* format (for nested format definitions).
*/
const px4::Array<orb_id_size_t, orb_compressed_max_num_orb_id_dependencies> &orbIDsDependencies() const { return _orb_ids_dependencies; }
/**
* Expand a tokenized format (after decompressing it)
* @param format tokenized format, expanded in-place
* @param len Length of the format, format[len] == '\0' must hold
* @param buf_len total length of format. Must be long enough for expanded format.
* @return expanded format length, or <0 on error
*/
static int expandMessageFormat(char *format, unsigned len, unsigned buf_len);
private:
State _state{State::ReadOrbIDs};
px4::Array<orb_id_size_t, orb_compressed_max_num_orb_ids> _orb_ids;
px4::Array<orb_id_size_t, orb_compressed_max_num_orb_id_dependencies> _orb_ids_dependencies;
unsigned _compressed_formats_idx{0};
char *_buffer{nullptr};
const unsigned _buffer_capacity;
uint32_t _buffer_length{0};
unsigned _format_length{0};
heatshrink_decoder _hsd;
};
} // namespace uORB

View File

@ -0,0 +1,234 @@
/****************************************************************************
*
* Copyright (C) 2023 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#include "uORBMessageFields.hpp"
#include <gtest/gtest.h>
#include <containers/Bitset.hpp>
#include <px4_platform_common/param.h>
#include <uORB/topics/uORBTopics.hpp>
#include <string>
// To run: make tests TESTFILTER=uORBMessageFields
class uORBMessageFieldsTest : public ::testing::Test
{
public:
void SetUp() override
{
param_control_autosave(false);
}
};
TEST_F(uORBMessageFieldsTest, decompressed_formats_match)
{
char buffer[1500];
uORB::MessageFormatReader format_reader(buffer, sizeof(buffer));
px4::Bitset<ORB_TOPICS_COUNT> formats_found;
px4::Bitset<ORB_TOPICS_COUNT> dependencies;
static const char *all_formats[] = ORB_DECOMPRESSED_MESSAGE_FIELDS;
int format_idx = 0;
bool done = false;
while (!done) {
switch (format_reader.readMore()) {
case uORB::MessageFormatReader::State::FormatComplete: {
const unsigned format_length = format_reader.formatLength();
EXPECT_GT(format_length, 0);
// Move the left-over (the part after the format if any) to the end of the buffer
const unsigned leftover_length = format_reader.moveLeftoverToBufferEnd();
for (const orb_id_size_t orb_id : format_reader.orbIDs()) {
// Ensure each orb_id is set only once
EXPECT_FALSE(formats_found[orb_id]);
formats_found.set(orb_id);
dependencies.set(orb_id, false); // Clear dependency
}
for (const orb_id_size_t orb_id : format_reader.orbIDsDependencies()) {
dependencies.set(orb_id);
}
// Compare format
ASSERT_LT(format_idx, sizeof(all_formats) / sizeof(all_formats[0]));
const std::string format(buffer, format_length);
const std::string format_expected(all_formats[format_idx]);
EXPECT_EQ(format, format_expected);
const int ret = uORB::MessageFormatReader::expandMessageFormat(buffer, format_length,
sizeof(buffer) - leftover_length);
EXPECT_GE(ret, 0);
++format_idx;
// Move left-over back
format_reader.clearFormatAndRestoreLeftover();
break;
}
break;
case uORB::MessageFormatReader::State::Failure:
PX4_ERR("Failed to read formats");
done = true;
ASSERT_FALSE(true);
break;
case uORB::MessageFormatReader::State::Complete:
done = true;
break;
default:
break;
}
}
// Check that all formats are found
for (size_t i = 0; i < formats_found.size(); ++i) {
EXPECT_TRUE(formats_found[i]);
}
// Expect dependencies to be cleared. If this is not the case, the format ordering is incorrect.
EXPECT_EQ(dependencies.count(), 0);
}
TEST_F(uORBMessageFieldsTest, decompress_formats_iterative)
{
char buffer[64];
uORB::MessageFormatReader format_reader(buffer, sizeof(buffer));
px4::Bitset<ORB_TOPICS_COUNT> formats_found;
static const char *all_formats[] = ORB_DECOMPRESSED_MESSAGE_FIELDS;
int format_idx = 0;
std::string current_format;
bool done = false;
while (!done) {
switch (format_reader.readMore()) {
case uORB::MessageFormatReader::State::ReadingFormat:
current_format += std::string(buffer, format_reader.formatLength());
format_reader.clearFormatFromBuffer();
break;
case uORB::MessageFormatReader::State::FormatComplete: {
current_format += std::string(buffer, format_reader.formatLength());
format_reader.clearFormatFromBuffer();
EXPECT_FALSE(current_format.empty());
for (const orb_id_size_t orb_id : format_reader.orbIDs()) {
// Ensure each orb_id is set only once
EXPECT_FALSE(formats_found[orb_id]);
formats_found.set(orb_id);
}
// Compare format
ASSERT_LT(format_idx, sizeof(all_formats) / sizeof(all_formats[0]));
const std::string format_expected(all_formats[format_idx]);
EXPECT_EQ(current_format, format_expected);
++format_idx;
current_format.clear();
break;
}
break;
case uORB::MessageFormatReader::State::Failure:
PX4_ERR("Failed to read formats");
done = true;
ASSERT_FALSE(true);
break;
case uORB::MessageFormatReader::State::Complete:
done = true;
break;
default:
break;
}
}
// Check that all formats are found
for (size_t i = 0; i < formats_found.size(); ++i) {
EXPECT_TRUE(formats_found[i]);
}
}
TEST_F(uORBMessageFieldsTest, decompress_formats_buffer_too_short)
{
char buffer[64];
static_assert(uORB::orb_tokenized_fields_max_length > sizeof(buffer), "Test expects smaller buffer");
uORB::MessageFormatReader format_reader(buffer, sizeof(buffer));
bool done = false;
while (!done) {
switch (format_reader.readMore()) {
case uORB::MessageFormatReader::State::Failure:
case uORB::MessageFormatReader::State::Complete:
done = true;
break;
default:
break;
}
}
EXPECT_EQ(format_reader.readMore(), uORB::MessageFormatReader::State::Failure);
}
TEST_F(uORBMessageFieldsTest, decompress_specific_format)
{
char format[512];
char buffer[128];
uORB::MessageFormatReader format_reader(buffer, sizeof(buffer));
ASSERT_TRUE(format_reader.readUntilFormat((orb_id_size_t)ORB_ID::orb_test));
int field_length = 0;
int format_length = 0;
while (format_reader.readNextField(field_length)) {
format_length += snprintf(format + format_length, sizeof(buffer) - format_length - 1, "%s;", buffer);
}
ASSERT_GT(uORB::MessageFormatReader::expandMessageFormat(format, format_length, sizeof(format)), 0);
const std::string expected_format = "uint64_t timestamp;int32_t val;uint8_t[4] _padding0;";
ASSERT_EQ(expected_format, format);
}

View File

@ -46,7 +46,6 @@ struct orb_metadata {
const char *o_name; /**< unique object name */
const uint16_t o_size; /**< object size */
const uint16_t o_size_no_padding; /**< object size w/o padding at the end (for logger) */
const char *o_fields; /**< semicolon separated list of fields (with type) */
uint8_t o_id; /**< ORB_ID enum */
};

View File

@ -99,6 +99,12 @@ public:
const T *begin() const { return &_items[0]; }
const T *end() const { return &_items[_size]; }
void clear()
{
_size = 0;
_overflow = false;
}
private:
T _items[N];
size_t _size{0};

View File

@ -51,6 +51,7 @@ add_subdirectory(dataman_client EXCLUDE_FROM_ALL)
add_subdirectory(drivers EXCLUDE_FROM_ALL)
add_subdirectory(field_sensor_bias_estimator EXCLUDE_FROM_ALL)
add_subdirectory(geo EXCLUDE_FROM_ALL)
add_subdirectory(heatshrink EXCLUDE_FROM_ALL)
add_subdirectory(hysteresis EXCLUDE_FROM_ALL)
add_subdirectory(l1 EXCLUDE_FROM_ALL)
add_subdirectory(led EXCLUDE_FROM_ALL)

View File

@ -0,0 +1,43 @@
############################################################################
#
# Copyright (c) 2023 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
px4_add_git_submodule(TARGET git_heatshrink PATH heatshrink)
px4_add_library(heatshrink
heatshrink/heatshrink_decoder.c
)
target_compile_options(heatshrink PRIVATE
${MAX_CUSTOM_OPT_LEVEL}
-DHEATSHRINK_DYNAMIC_ALLOC=0)

@ -0,0 +1 @@
Subproject commit 052e6de72f67f1777198bce98f3de62f7f3c16a0

View File

@ -0,0 +1,422 @@
import ctypes
from enum import Enum
# Note: this implementation directly follows the heatshrink_encoder.c code
# (it's neither expected to be very efficient, nor is it pythonic)
# Enum
class HSE_state(ctypes.c_int):
HSES_NOT_FULL = 0
HSES_FILLED = 1
HSES_SEARCH = 2
HSES_YIELD_TAG_BIT = 3
HSES_YIELD_LITERAL = 4
HSES_YIELD_BR_INDEX = 5
HSES_YIELD_BR_LENGTH = 6
HSES_SAVE_BACKLOG = 7
HSES_FLUSH_BITS = 8
HSES_DONE = 9
# Constants
FLAG_IS_FINISHING = 0x01
MATCH_NOT_FOUND = 0xFFFF
HEATSHRINK_LITERAL_MARKER = 0x01
HEATSHRINK_BACKREF_MARKER = 0x00
# Structs
class output_info(ctypes.Structure):
_fields_ = [
("buf", ctypes.POINTER(ctypes.c_uint8)),
("buf_size", ctypes.c_size_t),
("output_size", ctypes.POINTER(ctypes.c_size_t))
]
# Functions
def add_tag_bit(hse, oi, tag):
push_bits(hse, 1, tag, oi)
def push_bits(hse, count, bits, oi):
assert count <= 8
current_byte = ctypes.c_uint8(hse.current_byte)
bit_index = ctypes.c_uint8(hse.bit_index)
for i in range(count - 1, -1, -1):
bit = bits & (1 << i)
if bit:
current_byte.value |= bit_index.value
bit_index.value >>= 1
if bit_index.value == 0:
bit_index.value = 0x80
oi.buf[oi.output_size[0]] = current_byte.value
oi.output_size[0] += 1
current_byte.value = 0
hse.current_byte = current_byte.value
hse.bit_index = bit_index.value
def push_literal_byte(hse, oi):
processed_offset = hse.match_scan_index - 1
input_offset = get_input_offset(hse) + processed_offset
c = hse.buffer[input_offset]
push_bits(hse, 8, c, oi)
# Define necessary structures and enums
class heatshrink_encoder(ctypes.Structure):
_fields_ = [
("input_size", ctypes.c_uint16),
("match_scan_index", ctypes.c_uint16),
("match_length", ctypes.c_uint16),
("match_pos", ctypes.c_uint16),
("outgoing_bits", ctypes.c_uint16),
("outgoing_bits_count", ctypes.c_uint8),
("flags", ctypes.c_uint8),
("state", ctypes.c_uint8),
("current_byte", ctypes.c_uint8),
("bit_index", ctypes.c_uint8),
("buffer", ctypes.POINTER(ctypes.c_uint8)),
]
def __init__(self, window_size=8, lookahead_size=4):
super().__init__()
self.window_size = window_size
self.lookahead_size = lookahead_size
self.search_index = hs_index(window_size)
self.buffer = (ctypes.c_uint8 * (2 << window_size))()
self.bit_index = 0x80
class hs_index(ctypes.Structure):
_fields_ = [("index", ctypes.POINTER(ctypes.c_int16))]
def __init__(self, window_size):
super().__init__()
self.index = (ctypes.c_int16 * (2 << window_size))()
class HSE_sink_res(Enum):
HSER_SINK_OK = 0
HSER_SINK_ERROR_NULL = -1
HSER_SINK_ERROR_MISUSE = -2
class HSE_poll_res(Enum):
HSER_POLL_EMPTY = 0
HSER_POLL_MORE = 1
HSER_POLL_ERROR_NULL = -1
HSER_POLL_ERROR_MISUSE = -2
class HSE_finish_res(Enum):
HSER_FINISH_DONE = 0
HSER_FINISH_MORE = 1
HSER_FINISH_ERROR_NULL = -1
def is_finishing(hse):
return hse.flags & FLAG_IS_FINISHING
def can_take_byte(oi):
return oi.output_size[0] < oi.buf_size
def get_input_buffer_size(hse):
return 1 << hse.window_size
def get_lookahead_size(hse):
return 1 << hse.lookahead_size
def get_input_offset(hse):
return get_input_buffer_size(hse)
def heatshrink_encoder_sink(hse, in_buf, size, input_size):
if hse is None or in_buf is None or input_size is None:
return HSE_sink_res.HSER_SINK_ERROR_NULL
if is_finishing(hse):
return HSE_sink_res.HSER_SINK_ERROR_MISUSE
if hse.state != HSE_state.HSES_NOT_FULL:
return HSE_sink_res.HSER_SINK_ERROR_MISUSE
write_offset = get_input_offset(hse) + hse.input_size
ibs = get_input_buffer_size(hse)
rem = ibs - hse.input_size
cp_sz = min(rem, size)
for i in range(cp_sz):
hse.buffer[write_offset + i] = in_buf[i]
input_size.value = cp_sz
hse.input_size += cp_sz
if cp_sz == rem:
hse.state = HSE_state.HSES_FILLED
return HSE_sink_res.HSER_SINK_OK
def do_indexing(hse):
# Build an index array I that contains flattened linked lists
# for the previous instances of every byte in the buffer.
hsi = hse.search_index
last = [0xffff] * 256
buf = hse.buffer
index = hsi.index
input_offset = get_input_offset(hse)
end = input_offset + hse.input_size
for i in range(0, end):
v = buf[i]
lv = last[v]
index[i] = lv
last[v] = i
def heatshrink_encoder_poll(hse, out_buf, out_buf_size, output_size):
if hse is None or out_buf is None or output_size is None:
return HSE_poll_res.HSER_POLL_ERROR_NULL
if out_buf_size == 0:
return HSE_poll_res.HSER_POLL_ERROR_MISUSE
output_size[0] = 0
oi = output_info()
oi.buf = out_buf
oi.buf_size = out_buf_size
oi.output_size = output_size
while True:
in_state = hse.state
if in_state == HSE_state.HSES_NOT_FULL:
return HSE_poll_res.HSER_POLL_EMPTY
elif in_state == HSE_state.HSES_DONE:
return HSE_poll_res.HSER_POLL_EMPTY
elif in_state == HSE_state.HSES_FILLED:
do_indexing(hse)
hse.state = HSE_state.HSES_SEARCH
elif in_state == HSE_state.HSES_SEARCH:
hse.state = st_step_search(hse)
elif in_state == HSE_state.HSES_YIELD_TAG_BIT:
hse.state = st_yield_tag_bit(hse, oi)
elif in_state == HSE_state.HSES_YIELD_LITERAL:
hse.state = st_yield_literal(hse, oi)
elif in_state == HSE_state.HSES_YIELD_BR_INDEX:
hse.state = st_yield_br_index(hse, oi)
elif in_state == HSE_state.HSES_YIELD_BR_LENGTH:
hse.state = st_yield_br_length(hse, oi)
elif in_state == HSE_state.HSES_SAVE_BACKLOG:
hse.state = st_save_backlog(hse)
elif in_state == HSE_state.HSES_FLUSH_BITS:
hse.state = st_flush_bit_buffer(hse, oi)
else:
return HSE_poll_res.HSER_POLL_ERROR_MISUSE
if hse.state == in_state:
if oi.output_size == oi.buf_size:
return HSE_poll_res.HSER_POLL_MORE
def heatshrink_encoder_finish(hse):
hse.flags |= FLAG_IS_FINISHING
if hse.state == HSE_state.HSES_NOT_FULL:
hse.state = HSE_state.HSES_FILLED
if hse.state == HSE_state.HSES_DONE:
return HSE_finish_res.HSER_FINISH_DONE
return HSE_finish_res.HSER_FINISH_MORE
def st_step_search(hse):
window_length = get_input_buffer_size(hse)
lookahead_sz = get_lookahead_size(hse)
msi = hse.match_scan_index
fin = is_finishing(hse)
if msi > hse.input_size - (1 if fin else lookahead_sz):
return HSE_state.HSES_FLUSH_BITS if fin else HSE_state.HSES_SAVE_BACKLOG
input_offset = get_input_offset(hse)
end = input_offset + msi
start = end - window_length
max_possible = lookahead_sz if hse.input_size - msi >= lookahead_sz else hse.input_size - msi
match_pos, match_length = find_longest_match(hse, start, end, max_possible)
if match_pos == MATCH_NOT_FOUND:
hse.match_scan_index += 1
hse.match_length = 0
return HSE_state.HSES_YIELD_TAG_BIT
else:
hse.match_pos = match_pos
hse.match_length = match_length
return HSE_state.HSES_YIELD_TAG_BIT
def find_longest_match(hse, start, end, maxlen):
buf = hse.buffer
match_maxlen = 0
match_index = MATCH_NOT_FOUND
needlepoint = end
pos = hse.search_index.index[end]
buf_needlepoint_maxlen = buf[needlepoint + match_maxlen]
while pos >= start:
if buf[pos + match_maxlen] != buf_needlepoint_maxlen:
pos = hse.search_index.index[pos]
continue
length = 1
for length in range(1, maxlen):
if buf[pos + length] != buf[needlepoint + length]:
break
if length > match_maxlen:
match_maxlen = length
match_index = pos
buf_needlepoint_maxlen = buf[needlepoint + match_maxlen]
if length == maxlen:
break # won't find better
pos = hse.search_index.index[pos]
break_even_point = 1 + hse.window_size + hse.lookahead_size
if match_maxlen > (break_even_point // 8):
return end - match_index, match_maxlen
return MATCH_NOT_FOUND, 0
def push_outgoing_bits(hse, oi):
if hse.outgoing_bits_count > 8:
count = 8
bits = hse.outgoing_bits >> (hse.outgoing_bits_count - 8)
else:
count = hse.outgoing_bits_count
bits = hse.outgoing_bits
if count > 0:
push_bits(hse, count, bits, oi)
hse.outgoing_bits_count -= count
return count
def st_yield_tag_bit(hse, oi):
if can_take_byte(oi):
if hse.match_length == 0:
add_tag_bit(hse, oi, HEATSHRINK_LITERAL_MARKER)
return HSE_state.HSES_YIELD_LITERAL
else:
add_tag_bit(hse, oi, HEATSHRINK_BACKREF_MARKER)
hse.outgoing_bits = hse.match_pos - 1
hse.outgoing_bits_count = hse.window_size
return HSE_state.HSES_YIELD_BR_INDEX
else:
return HSE_state.HSES_YIELD_TAG_BIT
def st_yield_literal(hse, oi):
if can_take_byte(oi):
push_literal_byte(hse, oi)
return HSE_state.HSES_SEARCH
else:
return HSE_state.HSES_YIELD_LITERAL
def st_yield_br_index(hse, oi):
if can_take_byte(oi):
if push_outgoing_bits(hse, oi) > 0:
return HSE_state.HSES_YIELD_BR_INDEX
else:
hse.outgoing_bits = hse.match_length - 1
hse.outgoing_bits_count = hse.lookahead_size
return HSE_state.HSES_YIELD_BR_LENGTH
else:
return HSE_state.HSES_YIELD_BR_INDEX
def st_yield_br_length(hse, oi):
if can_take_byte(oi):
if push_outgoing_bits(hse, oi) > 0:
return HSE_state.HSES_YIELD_BR_LENGTH
else:
hse.match_scan_index += hse.match_length
hse.match_length = 0
return HSE_state.HSES_SEARCH
else:
return HSE_state.HSES_YIELD_BR_LENGTH
def st_save_backlog(hse):
save_backlog(hse)
return HSE_state.HSES_NOT_FULL
def st_flush_bit_buffer(hse, oi):
if hse.bit_index == 0x80:
return HSE_state.HSES_DONE
elif can_take_byte(oi):
oi.buf[oi.output_size[0]] = hse.current_byte
oi.output_size[0] += 1
return HSE_state.HSES_DONE
else:
return HSE_state.HSES_FLUSH_BITS
def save_backlog(hse):
input_buf_sz = get_input_buffer_size(hse)
msi = hse.match_scan_index
rem = input_buf_sz - msi # unprocessed bytes
shift_sz = input_buf_sz + rem
for i in range(shift_sz):
hse.buffer[i] = hse.buffer[input_buf_sz - rem + i]
hse.match_scan_index = 0
hse.input_size -= input_buf_sz - rem
def encode(data, window_size, lookahead_size):
hse = heatshrink_encoder(window_size, lookahead_size)
input_buf = (ctypes.c_uint8 * len(data))()
for i, d in enumerate(data):
input_buf[i] = d
in_size = len(input_buf)
out_buf_size = 4 * in_size # set output buffer size a bit larger
out_buf = (ctypes.c_uint8 * out_buf_size)()
sunk = 0
ret = []
while sunk < in_size:
input_size = ctypes.c_size_t(in_size)
heatshrink_encoder_sink(hse, input_buf, in_size - sunk, input_size)
input_buf = input_buf[input_size.value:]
sunk += input_size.value
if sunk == in_size:
heatshrink_encoder_finish(hse)
poll_res = HSE_poll_res.HSER_POLL_MORE
while poll_res == HSE_poll_res.HSER_POLL_MORE:
output_size = (ctypes.c_size_t * 1)()
poll_res = heatshrink_encoder_poll(hse, out_buf, out_buf_size, output_size)
ret += list(out_buf)[0:output_size[0]]
if sunk == in_size:
heatshrink_encoder_finish(hse)
return ret

View File

@ -35,6 +35,7 @@ px4_add_module(
MODULE modules__logger
MAIN logger
PRIORITY "SCHED_PRIORITY_MAX-30"
STACK_MAIN 2500
COMPILE_FLAGS
${MAX_CUSTOM_OPT_LEVEL}
-Wno-cast-align # TODO: fix and enable

View File

@ -44,12 +44,14 @@
#include <stdlib.h>
#include <time.h>
#include <uORB/uORBMessageFields.hpp>
#include <uORB/Publication.hpp>
#include <uORB/topics/uORBTopics.hpp>
#include <uORB/topics/parameter_update.h>
#include <uORB/topics/vehicle_command_ack.h>
#include <uORB/topics/battery_status.h>
#include <containers/Bitset.hpp>
#include <drivers/drv_hrt.h>
#include <mathlib/math/Limits.hpp>
#include <px4_platform/cpuload.h>
@ -1659,156 +1661,134 @@ void Logger::write_console_output()
}
void Logger::write_format(LogType type, const orb_metadata &meta, WrittenFormats &written_formats,
ulog_message_format_s &msg, int subscription_index, int level)
{
if (level > 3) {
// precaution: limit recursion level. If we land here it's either a bug or nested topic definitions. In the
// latter case, increase the maximum level.
PX4_ERR("max recursion level reached (%i)", level);
return;
}
// check if we already wrote the format: either if at a previous _subscriptions index or in written_formats
for (const auto &written_format : written_formats) {
if (written_format == &meta) {
PX4_DEBUG("already added: %s", meta.o_name);
return;
}
}
for (int i = 0; i < subscription_index; ++i) {
if (_subscriptions[i].get_topic() == &meta) {
PX4_DEBUG("already in _subscriptions: %s", meta.o_name);
return;
}
}
PX4_DEBUG("writing format for %s", meta.o_name);
// Write the current format (we don't need to check if we already added it to written_formats)
int format_len = snprintf(msg.format, sizeof(msg.format), "%s:", meta.o_name);
for (int format_idx = 0; meta.o_fields[format_idx] != 0;) {
const char *end_field = strchr(meta.o_fields + format_idx, ';');
if (!end_field) {
PX4_ERR("Format error in %s", meta.o_fields);
return;
}
const char *c_type = orb_get_c_type(meta.o_fields[format_idx]);
if (c_type) {
format_len += snprintf(msg.format + format_len, sizeof(msg.format) - format_len, "%s", c_type);
++format_idx;
}
int len = end_field - (meta.o_fields + format_idx) + 1;
if (len >= (int)sizeof(msg.format) - format_len) {
PX4_WARN("skip topic %s, format string is too large, max is %zu", meta.o_name,
sizeof(ulog_message_format_s::format));
return;
}
memcpy(msg.format + format_len, meta.o_fields + format_idx, len);
format_len += len;
format_idx += len;
}
msg.format[format_len] = '\0';
size_t msg_size = sizeof(msg) - sizeof(msg.format) + format_len;
msg.msg_size = msg_size - ULOG_MSG_HEADER_LEN;
write_message(type, &msg, msg_size);
if (level > 1 && !written_formats.push_back(&meta)) {
PX4_ERR("Array too small");
}
// Now go through the fields and check for nested type usages.
// o_fields looks like this for example: "<chr> timestamp;<chr>[5] array;"
const char *fmt = meta.o_fields;
while (fmt && *fmt) {
// extract the type name
char type_name[64];
const char *space = strchr(fmt, ' ');
if (!space) {
PX4_ERR("invalid format %s", fmt);
break;
}
const char *array_start = strchr(fmt, '['); // check for an array
int type_length;
if (array_start && array_start < space) {
type_length = array_start - fmt;
} else {
type_length = space - fmt;
}
if (type_length >= (int)sizeof(type_name)) {
PX4_ERR("buf len too small");
break;
}
memcpy(type_name, fmt, type_length);
type_name[type_length] = '\0';
// ignore built-in types
if (orb_get_c_type(type_name[0]) == nullptr) {
// find orb meta for type
const orb_metadata *const *topics = orb_get_topics();
const orb_metadata *found_topic = nullptr;
for (size_t i = 0; i < orb_topics_count(); i++) {
if (strcmp(topics[i]->o_name, type_name) == 0) {
found_topic = topics[i];
}
}
if (found_topic) {
write_format(type, *found_topic, written_formats, msg, subscription_index, level + 1);
} else {
PX4_ERR("No definition for topic %s found", fmt);
}
}
fmt = strchr(fmt, ';');
if (fmt) { ++fmt; }
}
}
void Logger::write_formats(LogType type)
{
_writer.lock();
// both of these are large and thus we need to be careful in terms of stack size requirements
// This is large and thus we need to be careful in terms of stack size requirements
ulog_message_format_s msg;
WrittenFormats written_formats;
// write all subscribed formats
// Write all subscribed formats
int sub_count = _num_subscriptions;
if (type == LogType::Mission) {
sub_count = _num_mission_subs;
}
// Keep a bitset of all required formats (nested definitions are added later on to the bitset)
px4::Bitset<ORB_TOPICS_COUNT> formats_to_write;
for (int i = 0; i < sub_count; ++i) {
const LoggerSubscription &sub = _subscriptions[i];
write_format(type, *sub.get_topic(), written_formats, msg, i);
if (sub.get_topic()->o_id < formats_to_write.size()) {
formats_to_write.set(sub.get_topic()->o_id);
} else {
PX4_ERR("logic error");
}
}
write_format(type, *_event_subscription.get_topic(), written_formats, msg, sub_count);
formats_to_write.set(_event_subscription.get_topic()->o_id);
static_assert(sizeof(msg.format) > uORB::orb_tokenized_fields_max_length, "uORB message definition too long");
uORB::MessageFormatReader format_reader(msg.format, sizeof(msg.format));
bool done = false;
while (!done) {
switch (format_reader.readMore()) {
case uORB::MessageFormatReader::State::FormatComplete: {
unsigned format_length = format_reader.formatLength();
// Move the left-over (the part after the format if any) to the end of the buffer
const unsigned leftover_length = format_reader.moveLeftoverToBufferEnd();
bool needs_expansion = true;
int last_name_length = 0;
bool format_error = false;
for (const orb_id_size_t orb_id : format_reader.orbIDs()) {
if (orb_id >= formats_to_write.size() || !formats_to_write[orb_id]) {
continue;
}
// Make sure to write dependencies too
for (const orb_id_size_t orb_id_dep : format_reader.orbIDsDependencies()) {
formats_to_write.set(orb_id_dep);
}
formats_to_write.set(orb_id, false);
const orb_metadata &meta = *get_orb_meta((ORB_ID) orb_id);
PX4_DEBUG("writing format for %s", meta.o_name);
// Expand if needed (first time only)
if (needs_expansion) {
const int ret = uORB::MessageFormatReader::expandMessageFormat(msg.format, format_length,
sizeof(msg.format) - leftover_length);
if (ret < 0) {
PX4_ERR("Format %s error (too long?)", meta.o_name);
format_error = true;
} else {
format_length = ret;
}
needs_expansion = false;
}
// Prepend format name and ':'
const int name_length = strlen(meta.o_name) + 1; // + 1 for ':'
if (format_length + name_length - last_name_length + 1 > sizeof(msg.format) - leftover_length) {
PX4_ERR("Format %s too long", meta.o_name);
format_error = true;
}
if (format_error) {
break;
}
if (last_name_length != name_length) {
memmove(msg.format + name_length, msg.format + last_name_length,
format_length + 1 - last_name_length);
msg.format[name_length - 1] = ':';
format_length += name_length - last_name_length;
last_name_length = name_length;
}
memcpy(msg.format, meta.o_name, name_length - 1);
size_t msg_size = sizeof(msg) - sizeof(msg.format) + format_length;
msg.msg_size = msg_size - ULOG_MSG_HEADER_LEN;
write_message(type, &msg, msg_size);
}
// Move left-over back
format_reader.clearFormatAndRestoreLeftover();
break;
}
break;
case uORB::MessageFormatReader::State::Failure:
PX4_ERR("Failed to read formats");
done = true;
break;
case uORB::MessageFormatReader::State::Complete:
done = true;
break;
default:
break;
}
}
if (formats_to_write.count() > 0) {
// Getting here is a bug. Maybe the ordering of nested formats is not as expected?
PX4_ERR("Not all formats written");
}
_writer.unlock();
}

View File

@ -236,11 +236,6 @@ private:
*/
void write_header(LogType type);
/// Array to store written formats for nested definitions (only)
using WrittenFormats = Array < const orb_metadata *, 20 >;
void write_format(LogType type, const orb_metadata &meta, WrittenFormats &written_formats, ulog_message_format_s &msg,
int subscription_index, int level = 1);
void write_formats(LogType type);
/**

View File

@ -116,7 +116,7 @@ struct ulog_message_format_s {
uint16_t msg_size; ///< size of message - ULOG_MSG_HEADER_LEN
uint8_t msg_type = static_cast<uint8_t>(ULogMessageType::FORMAT);
char format[1500];
char format[1600];
};
/**

View File

@ -47,6 +47,7 @@
#include <px4_platform_common/time.h>
#include <px4_platform_common/shutdown.h>
#include <lib/parameters/param.h>
#include <uORB/uORBMessageFields.hpp>
#include <cstring>
#include <float.h>
@ -391,33 +392,30 @@ Replay::readFormat(std::ifstream &file, uint16_t msg_size)
}
string Replay::parseOrbFields(const string &fields)
string Replay::getOrbFields(const orb_metadata *meta)
{
string ret{};
char format[3000];
char buffer[512];
uORB::MessageFormatReader format_reader(buffer, sizeof(buffer));
// convert o_fields from "<chr> timestamp;<chr>[5] array;" to "uint64_t timestamp;int8_t[5] array;"
for (int format_idx = 0; format_idx < (int)fields.length();) {
const char *end_field = strchr(fields.c_str() + format_idx, ';');
if (!end_field) {
PX4_ERR("Format error in %s", fields.c_str());
return "";
}
const char *c_type = orb_get_c_type(fields[format_idx]);
if (c_type) {
string str_type = c_type;
ret += str_type;
++format_idx;
}
int len = end_field - (fields.c_str() + format_idx) + 1;
ret += fields.substr(format_idx, len);
format_idx += len;
if (!format_reader.readUntilFormat(meta->o_id)) {
PX4_ERR("failed to find format for topic %s", meta->o_name);
return "";
}
return ret;
int field_length = 0;
int format_length = 0;
while (format_reader.readNextField(field_length)) {
format_length += snprintf(format + format_length, sizeof(buffer) - format_length - 1, "%s;", buffer);
}
if (uORB::MessageFormatReader::expandMessageFormat(format, format_length, sizeof(format)) < 0) {
PX4_ERR("failed to expand message format for %s", meta->o_name);
return "";
}
return format;
}
bool
@ -455,7 +453,7 @@ Replay::readAndAddSubscription(std::ifstream &file, uint16_t msg_size)
// FIXME: this should check recursively, all used nested types
string file_format = _file_formats[topic_name];
const string orb_fields = parseOrbFields(orb_meta->o_fields);
const string orb_fields = getOrbFields(orb_meta);
if (file_format != orb_fields) {
// check if we have a compatibility conversion available

View File

@ -297,7 +297,7 @@ private:
void setUserParams(const char *filename);
void readDynamicParams(const char *filename);
std::string parseOrbFields(const std::string &fields);
std::string getOrbFields(const orb_metadata *meta);
static char *_replay_file;
};