Compare commits

...

15 Commits

Author SHA1 Message Date
GuillaumeLaine d120b8b6c0
ev_odom: always convert reference frame enum 2024-01-19 08:25:26 +01:00
Beat Küng b32d1c295b
uxrce_dds_client: immediately create data writers on startup
There is some race condition where in rare cases the topic publication
right after creating the writer did not get received on the ROS side.
This happens even with reliable QoS & reliable transport.
2024-01-19 08:25:26 +01:00
Beat Küng 39d7c5b60c
uxrce_dds_client: add reliable_qos option topics
It's not set for any topic, as it only works for UDP but not over serial.
Over serial, all topics are created, but then no data is sent:
uxrce_dds_client status
INFO  [uxrce_dds_client] Running, connected
INFO  [uxrce_dds_client] Using transport:     serial
INFO  [uxrce_dds_client] Payload tx:          0 B/s
INFO  [uxrce_dds_client] Payload rx:          0 B/s
2024-01-19 08:25:26 +01:00
GuillaumeLaine a73a308486
ci: add external navigation integration tests 2024-01-19 08:25:25 +01:00
Beat Küng 79834fd795
ros_tests: change filter to use all tests
The name is being changed in https://github.com/Auterion/px4-ros2-interface-lib/pull/8
2024-01-19 08:25:25 +01:00
Beat Küng d0083d717b
fix rtl: ensure mode_completed is called when finished
set_rtl_item() is not called if 'rtl_state == RTLState::IDLE'
2024-01-19 08:25:25 +01:00
Beat Küng d632a42be5
generate_component_general: ensure string is a regex literal
Fixes the Python warning:
SyntaxWarning: invalid escape sequence '\s'

It was still working correctly before.
2024-01-19 08:25:25 +01:00
Beat Küng 37f24a17eb
integration tests: add --force-color & set in CI
github actions supports color output, but does not report as a tty.
See https://github.com/actions/runner/issues/241.
2024-01-19 08:25:25 +01:00
Beat Küng e26767e260
ci: add ros integration tests 2024-01-19 08:25:25 +01:00
Beat Küng 9fd264fe33
test: add ros integration test runner script & config 2024-01-19 08:25:25 +01:00
Beat Küng 181777b8bc
refactor mavsdk_tests: move code into separate classes & extract mavsdk-specifics
Allows it to be reused for other integration tests, like ros.
2024-01-19 08:25:25 +01:00
Beat Küng eb3acb2de1
fix battery_simulator: consider voltage load drop when forcing empty battery
Otherwise the battery wasn't considered empty, and trigger critical battery
actions.
2024-01-19 08:25:24 +01:00
Beat Küng aa36686ea4
mavsdk_tests: highlight px4 errors & reset color on gazebo output 2024-01-19 08:25:24 +01:00
Beat Küng 216eb002eb
fix mavsdk_tests: add ',' to test_filter config 2024-01-19 08:25:24 +01:00
Beat Küng 36241bbdb2
sitl: add generic way to override params via ENV variables 2024-01-19 08:25:22 +01:00
21 changed files with 1141 additions and 632 deletions

View File

@ -0,0 +1,96 @@
name: ROS Integration Tests
on:
push:
branches:
- 'main'
pull_request:
branches:
- '*'
jobs:
build:
runs-on: ubuntu-latest
container:
image: px4io/px4-dev-ros2-galactic:2021-09-08
options: --privileged --ulimit core=-1 --security-opt seccomp=unconfined
steps:
- uses: actions/checkout@v1
with:
token: ${{ secrets.ACCESS_TOKEN }}
- name: Install gazebo
run: |
apt update && apt install -y gazebo11 libgazebo11-dev gstreamer1.0-plugins-bad gstreamer1.0-plugins-base gstreamer1.0-plugins-good gstreamer1.0-plugins-ugly libgstreamer-plugins-base1.0-dev
- name: Prepare ccache timestamp
id: ccache_cache_timestamp
shell: cmake -P {0}
run: |
string(TIMESTAMP current_date "%Y-%m-%d-%H;%M;%S" UTC)
message("::set-output name=timestamp::${current_date}")
- name: ccache cache files
uses: actions/cache@v2
with:
path: ~/.ccache
key: ros_integration_tests-${{matrix.config.build_type}}-ccache-${{steps.ccache_cache_timestamp.outputs.timestamp}}
restore-keys: ros_integration_tests-${{matrix.config.build_type}}-ccache-
- name: setup ccache
run: |
mkdir -p ~/.ccache
echo "base_dir = ${GITHUB_WORKSPACE}" > ~/.ccache/ccache.conf
echo "compression = true" >> ~/.ccache/ccache.conf
echo "compression_level = 6" >> ~/.ccache/ccache.conf
echo "max_size = 300M" >> ~/.ccache/ccache.conf
echo "hash_dir = false" >> ~/.ccache/ccache.conf
ccache -s
ccache -z
- name: Get and build micro-xrce-dds-agent
run: |
cd /opt
git clone --recursive https://github.com/eProsima/Micro-XRCE-DDS-Agent.git
cd Micro-XRCE-DDS-Agent
git checkout v2.2.1 # recent versions require cmake 3.22, but px4-dev-ros2-galactic:2021-09-08 is on 3.16
mkdir build
cd build
cmake ..
make -j2
- name: ccache post-run micro-xrce-dds-agent
run: ccache -s
- name: Get and build the ros2 interface library
shell: bash
run: |
. /opt/ros/galactic/setup.bash
mkdir -p /opt/px4_ws/src
cd /opt/px4_ws/src
git clone --recursive https://github.com/Auterion/px4-ros2-interface-lib.git
cd ..
vcs import --input src/px4-ros2-interface-lib/dependencies.repos src
colcon build --symlink-install
- name: ccache post-run ros workspace
run: ccache -s
- name: Build PX4
run: make px4_sitl_default
- name: ccache post-run px4/firmware
run: ccache -s
- name: Build SITL Gazebo
run: make px4_sitl_default sitl_gazebo-classic
- name: ccache post-run sitl_gazebo-classic
run: ccache -s
- name: Core dump settings
run: |
ulimit -c unlimited
echo "`pwd`/%e.core" > /proc/sys/kernel/core_pattern
- name: Run tests
shell: bash
run: |
. /opt/px4_ws/install/setup.bash
/opt/Micro-XRCE-DDS-Agent/build/MicroXRCEAgent udp4 localhost -p 8888 -v 0 &
test/ros_test_runner.py --verbose --model iris --upload --force-color
timeout-minutes: 45

View File

@ -96,7 +96,7 @@ jobs:
PX4_HOME_LON: ${{matrix.config.longitude}}
PX4_HOME_ALT: ${{matrix.config.altitude}}
PX4_CMAKE_BUILD_TYPE: ${{matrix.config.build_type}}
run: test/mavsdk_tests/mavsdk_test_runner.py --speed-factor 20 --abort-early --model ${{matrix.config.model}} --upload test/mavsdk_tests/configs/sitl.json --verbose
run: test/mavsdk_tests/mavsdk_test_runner.py --speed-factor 20 --abort-early --model ${{matrix.config.model}} --upload test/mavsdk_tests/configs/sitl.json --verbose --force-color
timeout-minutes: 45
- name: Look at core files

View File

@ -126,6 +126,15 @@ then
set AUTOCNF yes
fi
# Allow overriding parameters via env variables: export PX4_PARAM_{name}={value}
env | while IFS='=' read -r line; do
value=${line#*=}
name=${line%%=*}
case $name in
"PX4_PARAM_"*) param set "${name#PX4_PARAM_}" "$value" ;;
esac
done
# multi-instance setup
# shellcheck disable=SC2154
param set MAV_SYS_ID $((px4_instance+1))
@ -186,11 +195,6 @@ param set-default SYS_FAILURE_EN 1
# does not go below 50% by default, but failure injection can trigger failsafes.
param set-default COM_LOW_BAT_ACT 2
# Allow to override SYS_MC_EST_GROUP via env
if [ -n "$SYS_MC_EST_GROUP" ]; then
param set SYS_MC_EST_GROUP $SYS_MC_EST_GROUP
fi
# Adapt timeout parameters if simulation runs faster or slower than realtime.
if [ -n "$PX4_SIM_SPEED_FACTOR" ]; then
COM_DL_LOSS_T_LONGER=$(echo "$PX4_SIM_SPEED_FACTOR * 10" | bc)

View File

@ -76,6 +76,11 @@ public:
*/
int cell_count() { return _params.n_cells; }
/**
* Get the voltage drop per cell on full throttle
*/
float load_drop_v() { return _params.v_load_drop; }
/**
* Get the empty voltage per cell
*/

View File

@ -20,7 +20,7 @@ version_file = args.version_file
version_dir = ''
if version_file is not None:
for line in open(version_file, "r"):
version_search = re.search('PX4_GIT_TAG_OR_BRANCH_NAME\s+"(.+)"', line)
version_search = re.search(r'PX4_GIT_TAG_OR_BRANCH_NAME\s+"(.+)"', line)
if version_search:
version_dir = version_search.group(1)
break

View File

@ -2161,7 +2161,6 @@ bool EKF2::UpdateExtVisionSample(ekf2_timestamps_s &ekf2_timestamps)
const Vector3f ev_odom_vel(ev_odom.velocity);
const Vector3f ev_odom_vel_var(ev_odom.velocity_variance);
if (ev_odom_vel.isAllFinite()) {
bool velocity_frame_valid = false;
switch (ev_odom.velocity_frame) {
@ -2181,6 +2180,7 @@ bool EKF2::UpdateExtVisionSample(ekf2_timestamps_s &ekf2_timestamps)
break;
}
if (ev_odom_vel.isAllFinite()) {
if (velocity_frame_valid) {
ev_data.vel = ev_odom_vel;
@ -2205,7 +2205,6 @@ bool EKF2::UpdateExtVisionSample(ekf2_timestamps_s &ekf2_timestamps)
const Vector3f ev_odom_pos(ev_odom.position);
const Vector3f ev_odom_pos_var(ev_odom.position_variance);
if (ev_odom_pos.isAllFinite()) {
bool position_frame_valid = false;
switch (ev_odom.pose_frame) {
@ -2220,6 +2219,7 @@ bool EKF2::UpdateExtVisionSample(ekf2_timestamps_s &ekf2_timestamps)
break;
}
if (ev_odom_pos.isAllFinite()) {
if (position_frame_valid) {
ev_data.pos = ev_odom_pos;

View File

@ -324,19 +324,21 @@ void RtlDirect::set_rtl_item()
startPrecLand(_mission_item.land_precision);
_rtl_state = RTLState::IDLE;
_rtl_state = RTLState::COMPLETED;
mavlink_log_info(_navigator->get_mavlink_log_pub(), "RTL: land at destination\t");
events::send(events::ID("rtl_land_at_destination"), events::Log::Info, "RTL: land at destination");
break;
}
case RTLState::IDLE: {
case RTLState::COMPLETED: {
set_idle_item(&_mission_item);
_navigator->mode_completed(vehicle_status_s::NAVIGATION_STATE_AUTO_RTL);
_rtl_state = RTLState::IDLE;
break;
}
case RTLState::IDLE:
default:
break;
}
@ -489,6 +491,7 @@ rtl_time_estimate_s RtlDirect::calc_rtl_time_estimate()
break;
case RTLState::COMPLETED:
case RTLState::IDLE:
// Remaining time is 0
break;

View File

@ -115,6 +115,7 @@ private:
TRANSITION_TO_MC,
MOVE_TO_LAND_HOVER,
LAND,
COMPLETED,
IDLE
} _rtl_state{RTLState::IDLE}; /*< Current state in the state machine.*/

View File

@ -103,7 +103,7 @@ void BatterySimulator::Run()
_battery.full_cell_voltage());
if (_force_empty_battery) {
vbatt = _battery.empty_cell_voltage();
vbatt = _battery.empty_cell_voltage() - _battery.load_drop_v();
}
vbatt *= _battery.cell_count();

View File

@ -45,6 +45,7 @@ struct SendSubscription {
const char* dds_type_name;
uint32_t topic_size;
UcdrSerializeMethod ucdr_serialize_method;
bool reliable_qos;
};
// Subscribers for messages to send
@ -56,6 +57,7 @@ struct SendTopicsSubs {
"@(pub['dds_type'])",
ucdr_topic_size_@(pub['simple_base_type'])(),
&ucdr_serialize_@(pub['simple_base_type']),
@(pub['reliable_qos']),
},
@[ end for]@
};
@ -64,18 +66,25 @@ struct SendTopicsSubs {
uint32_t num_payload_sent{};
void init();
bool init(uxrSession *session, uxrStreamId reliable_out_stream_id, uxrStreamId reliable_in_stream_id, uxrStreamId best_effort_in_stream_id, uxrObjectId participant_id, const char *client_namespace);
void update(uxrSession *session, uxrStreamId reliable_out_stream_id, uxrStreamId best_effort_stream_id, uxrObjectId participant_id, const char *client_namespace);
void reset();
};
void SendTopicsSubs::init() {
bool SendTopicsSubs::init(uxrSession *session, uxrStreamId reliable_out_stream_id, uxrStreamId reliable_in_stream_id, uxrStreamId best_effort_in_stream_id, uxrObjectId participant_id, const char *client_namespace) {
bool ret = true;
for (unsigned idx = 0; idx < sizeof(send_subscriptions)/sizeof(send_subscriptions[0]); ++idx) {
fds[idx].fd = orb_subscribe(send_subscriptions[idx].orb_meta);
fds[idx].events = POLLIN;
orb_set_interval(fds[idx].fd, UXRCE_DEFAULT_POLL_RATE);
if (!create_data_writer(session, reliable_out_stream_id, participant_id, static_cast<ORB_ID>(send_subscriptions[idx].orb_meta->o_id), client_namespace, send_subscriptions[idx].orb_meta->o_name,
send_subscriptions[idx].dds_type_name, send_subscriptions[idx].data_writer, send_subscriptions[idx].reliable_qos)) {
ret = false;
}
}
return ret;
}
void SendTopicsSubs::reset() {
num_payload_sent = 0;
@ -94,17 +103,13 @@ void SendTopicsSubs::update(uxrSession *session, uxrStreamId reliable_out_stream
if (fds[idx].revents & POLLIN) {
// Topic updated, copy data and send
orb_copy(send_subscriptions[idx].orb_meta, fds[idx].fd, &topic_data);
if (send_subscriptions[idx].data_writer.id == UXR_INVALID_ID) {
// data writer not created yet
create_data_writer(session, reliable_out_stream_id, participant_id, static_cast<ORB_ID>(send_subscriptions[idx].orb_meta->o_id), client_namespace, send_subscriptions[idx].orb_meta->o_name,
send_subscriptions[idx].dds_type_name, send_subscriptions[idx].data_writer);
}
if (send_subscriptions[idx].data_writer.id != UXR_INVALID_ID) {
ucdrBuffer ub;
uint32_t topic_size = send_subscriptions[idx].topic_size;
if (uxr_prepare_output_stream(session, best_effort_stream_id, send_subscriptions[idx].data_writer, &ub, topic_size) != UXR_INVALID_REQUEST_ID) {
const uxrStreamId stream_id = send_subscriptions[idx].reliable_qos ? reliable_out_stream_id : best_effort_stream_id;
if (uxr_prepare_output_stream(session, stream_id, send_subscriptions[idx].data_writer, &ub, topic_size) != UXR_INVALID_REQUEST_ID) {
send_subscriptions[idx].ucdr_serialize_method(&topic_data, ub, time_offset_us);
// TODO: fill up the MTU and then flush, which reduces the packet overhead
uxr_flash_output_streams(session);
@ -169,7 +174,8 @@ bool RcvTopicsPubs::init(uxrSession *session, uxrStreamId reliable_out_stream_id
@[ for idx, sub in enumerate(subscriptions + subscriptions_multi)]@
{
uint16_t queue_depth = uORB::DefaultQueueSize<@(sub['simple_base_type'])_s>::value * 2; // use a bit larger queue size than internal
create_data_reader(session, reliable_out_stream_id, best_effort_in_stream_id, participant_id, @(idx), client_namespace, "@(sub['topic_simple'])", "@(sub['dds_type'])", queue_depth);
const uxrStreamId stream_in_id = @(sub['reliable_qos']) ? reliable_in_stream_id : best_effort_in_stream_id;
create_data_reader(session, reliable_out_stream_id, stream_in_id, participant_id, @(idx), client_namespace, "@(sub['topic_simple'])", "@(sub['dds_type'])", queue_depth, @(sub['reliable_qos']));
}
@[ end for]@

View File

@ -99,6 +99,8 @@ def process_message_type(msg_type):
# topic_simple: eg vehicle_status
msg_type['topic_simple'] = msg_type['topic'].split('/')[-1]
msg_type['reliable_qos'] = 'true' if msg_type.get('reliable_qos', False) else 'false'
pubs_not_empty = msg_map['publications'] is not None
if pubs_not_empty:
for p in msg_map['publications']:

View File

@ -36,7 +36,7 @@ static bool generate_topic_name(char *topic, const char *client_namespace, const
static bool create_data_writer(uxrSession *session, uxrStreamId reliable_out_stream_id, uxrObjectId participant_id,
ORB_ID orb_id, const char *client_namespace, const char *topic_name_simple, const char *type_name,
uxrObjectId &datawriter_id)
uxrObjectId &datawriter_id, bool reliable_qos)
{
// topic
char topic_name[TOPIC_NAME_SIZE];
@ -62,7 +62,7 @@ static bool create_data_writer(uxrSession *session, uxrStreamId reliable_out_str
uxrQoS_t qos = {
.durability = UXR_DURABILITY_TRANSIENT_LOCAL,
.reliability = UXR_RELIABILITY_BEST_EFFORT,
.reliability = reliable_qos ? UXR_RELIABILITY_RELIABLE : UXR_RELIABILITY_BEST_EFFORT,
.history = UXR_HISTORY_KEEP_LAST,
.depth = 0,
};
@ -88,7 +88,7 @@ static bool create_data_writer(uxrSession *session, uxrStreamId reliable_out_str
static bool create_data_reader(uxrSession *session, uxrStreamId reliable_out_stream_id, uxrStreamId input_stream_id,
uxrObjectId participant_id, uint16_t index, const char *client_namespace, const char *topic_name_simple,
const char *type_name, uint16_t queue_depth)
const char *type_name, uint16_t queue_depth, bool reliable_qos)
{
// topic
char topic_name[TOPIC_NAME_SIZE];
@ -118,7 +118,7 @@ static bool create_data_reader(uxrSession *session, uxrStreamId reliable_out_str
uxrQoS_t qos = {
.durability = UXR_DURABILITY_VOLATILE,
.reliability = UXR_RELIABILITY_BEST_EFFORT,
.reliability = reliable_qos ? UXR_RELIABILITY_RELIABLE : UXR_RELIABILITY_BEST_EFFORT,
.history = UXR_HISTORY_KEEP_LAST,
.depth = queue_depth,
};

View File

@ -418,6 +418,11 @@ void UxrceddsClient::run()
return;
}
if (!_subs->init(&session, reliable_out, reliable_in, best_effort_in, participant_id, _client_namespace)) {
PX4_ERR("subs init failed");
return;
}
// create VehicleCommand replier
if (num_of_repliers < MAX_NUM_REPLIERS) {
if (add_replier(new VehicleCommandSrv(&session, reliable_out, reliable_in, participant_id, _client_namespace,
@ -463,8 +468,6 @@ void UxrceddsClient::run()
uint32_t last_num_payload_received{};
int poll_error_counter = 0;
_subs->init();
while (!should_exit() && _connected) {
/* Wait for topic updates for max 1000 ms (1sec) */

View File

@ -7,7 +7,7 @@
{
"model": "iris",
"vehicle": "iris",
"test_filter": "[multicopter],[offboard][offboard_attitude]",
"test_filter": "[multicopter],[offboard],[offboard_attitude]",
"timeout_min": 10
},
{
@ -16,7 +16,7 @@
"test_filter": "[offboard_attitude]",
"timeout_min": 10,
"env": {
"SYS_MC_EST_GROUP": 3
"PX4_PARAM_SYS_MC_EST_GROUP": 3
}
},
{

View File

@ -1,11 +1,10 @@
#!/usr/bin/env python3
import re
import sys
import os
from enum import Enum
from functools import lru_cache
force_color = False
class color(Enum):
PURPLE = '\033[95m'
@ -22,7 +21,7 @@ class color(Enum):
def colorize(text: str, c: color) -> str:
if _supports_color():
if force_color or _supports_color():
return str(c.value) + text + color.RESET.value
else:
return text

View File

@ -1,5 +1,3 @@
#!/usr/bin/env python3
import queue
import time
import os
@ -9,6 +7,7 @@ import shutil
import threading
import errno
from typing import Any, Dict, List, TextIO, Optional
from .logger_helper import colorize, color
PX4_SITL_GAZEBO_PATH = "Tools/simulation/gazebo-classic/sitl_gazebo-classic"
@ -152,12 +151,13 @@ class Runner:
class Px4Runner(Runner):
def __init__(self, workspace_dir: str, log_dir: str,
model: str, case: str, speed_factor: float,
debugger: str, verbose: bool, build_dir: str):
debugger: str, verbose: bool, build_dir: str,
rootfs_base_dirname: str):
super().__init__(log_dir, model, case, verbose)
self.name = "px4"
self.cmd = os.path.join(workspace_dir, build_dir, "bin/px4")
self.cwd = os.path.join(workspace_dir, build_dir,
"tmp_mavsdk_tests/rootfs")
rootfs_base_dirname, "rootfs")
self.args = [
os.path.join(workspace_dir, build_dir, "etc"),
"-s",
@ -212,6 +212,16 @@ class Px4Runner(Runner):
except FileExistsError:
pass
def get_output_line(self) -> Optional[str]:
line = super().get_output_line()
if line is not None:
# colorize warnings and errors
if 'ERROR' in line:
line = colorize(line, color.RED)
elif 'WARN' in line:
line = colorize(line, color.YELLOW)
return line
class GzserverRunner(Runner):
def __init__(self,
@ -252,6 +262,13 @@ class GzserverRunner(Runner):
.format(timeout_s))
return False
def get_output_line(self) -> Optional[str]:
line = super().get_output_line()
# Some gazebo versions don't seem to reset the color, so always add a RESET
if line is not None:
line = line + color.RESET.value
return line
class GzmodelspawnRunner(Runner):
def __init__(self,
@ -333,7 +350,7 @@ class GzclientRunner(Runner):
self.args = ["--verbose"]
class TestRunner(Runner):
class TestRunnerMavsdk(Runner):
def __init__(self,
workspace_dir: str,
log_dir: str,
@ -355,3 +372,30 @@ class TestRunner(Runner):
"--url", mavlink_connection,
"--speed-factor", str(speed_factor),
case]
class TestRunnerRos(Runner):
def __init__(self,
workspace_dir: str,
log_dir: str,
model: str,
case: str,
verbose: bool,
ros_package_build_dir: str):
super().__init__(log_dir, model, case, verbose)
self.name = "integration_tests"
self.cwd = workspace_dir
self.cmd = "nice"
self.args = ["-5",
os.path.join(
ros_package_build_dir,
"integration_tests"),
"--gtest_filter="+case, "--gtest_color=yes"]
def get_output_line(self) -> Optional[str]:
line = super().get_output_line()
if line is not None:
# colorize assertion failures & errors
if 'Failure' in line or '[ERROR]' in line or '[FATAL]' in line:
line = colorize(line, color.RED)
return line

View File

@ -0,0 +1,569 @@
import datetime
import fnmatch
import math
import os
import re
import sys
import time
from typing import Any, Dict, List, NoReturn, TextIO, Optional
from types import FrameType
from . import process_helper as ph
from .logger_helper import color, colorize
try:
import requests
except ImportError as e:
print("Failed to import requests: " + str(e))
print("")
print("You may need to install it using:")
print(" pip3 install --user requests")
print("")
sys.exit(1)
class TesterInterface:
def query_test_cases(self, build_dir: str, filter: str) -> List[str]:
"""Get a list of test cases that match a given filter"""
raise NotImplementedError
def create_test_runner(self,
workspace_dir: str,
log_dir: str,
model: str,
case: str,
config: Dict[str, Any],
speed_factor: float,
verbose: bool,
build_dir: str) -> ph.Runner:
"""Instantiate a Runner that starts the test executable"""
raise NotImplementedError
def rootfs_base_dirname(self) -> str:
"""Get a directory name to be used as rootfs dir in PX4 inside the build directory"""
raise NotImplementedError
class Tester:
def __init__(self,
config: Dict[str, Any],
iterations: int,
abort_early: bool,
speed_factor: float,
model: str,
case: str,
debugger: str,
log_dir: str,
gui: bool,
verbose: bool,
upload: bool,
build_dir: str,
tester_interface: TesterInterface):
self.config = config
self.build_dir = build_dir
self.active_runners: List[ph.Runner]
self.iterations = iterations
self.abort_early = abort_early
self.speed_factor = speed_factor
self.debugger = debugger
self.log_dir = log_dir
self.gui = gui
self.verbose = verbose
self.upload = upload
self.start_time = datetime.datetime.now()
self.log_fd: Any[TextIO] = None
self.tester_interface = tester_interface
self.tests = self.determine_tests(config['tests'], model, case)
self.active_runners = []
@staticmethod
def wildcard_match(pattern: str, potential_match: str) -> bool:
return fnmatch.fnmatchcase(potential_match, pattern)
@staticmethod
def plural_s(n_items: int) -> str:
if n_items > 1:
return "s"
else:
return ""
def determine_tests(self,
tests: List[Dict[str, Any]],
model: str,
case: str) -> List[Dict[str, Any]]:
for test in tests:
test['selected'] = (model == 'all' or model == test['model'])
test['cases'] = dict.fromkeys(
self.tester_interface.query_test_cases(self.build_dir, test['test_filter']))
for key in test['cases'].keys():
test['cases'][key] = {
'selected': (test['selected'] and
(case == 'all' or
self.wildcard_match(case, key)))}
return tests
def run(self) -> bool:
self.show_plans()
self.prepare_for_results()
self.run_tests()
self.show_detailed_results()
self.show_overall_result()
return self.was_overall_pass()
def show_plans(self) -> None:
print()
print(colorize(
"About to run {} test case{} for {} selected model{} "
"({} iteration{}):".
format(self.num_cases(), self.plural_s(self.num_cases()),
self.num_models(), self.plural_s(self.num_models()),
self.iterations, self.plural_s(self.iterations)),
color.BOLD))
for test in self.tests:
if not test['selected']:
print(colorize(colorize(
" - {} (not selected)".format(test['model']),
color.BOLD), color.GRAY))
continue
print(colorize(" - {}:".format(test['model']), color.BOLD))
for key, case in test['cases'].items():
if case['selected']:
print(" - '{}'".format(key))
else:
print(colorize(" - '{}' (not selected)".format(key),
color.GRAY))
print()
def num_models(self) -> int:
return sum(map(
lambda test: 1 if test['selected'] else 0,
self.tests))
def num_cases(self) -> int:
n_cases = 0
for test in self.tests:
if not test['selected']:
continue
n_cases += self.num_cases_for_test(test)
return n_cases
def num_cases_for_test(self, test: Dict[str, Any]) -> int:
n_cases = 0
for case in test['cases'].values():
if not case['selected']:
continue
n_cases += 1
return n_cases
def prepare_for_results(self) -> None:
for test in self.tests:
if not test['selected']:
continue
for key, value in test['cases'].items():
if not value['selected']:
continue
value['results'] = []
def run_tests(self) -> None:
for iteration in range(self.iterations):
if self.iterations > 1:
print(colorize("%%% Test iteration: {} / {}".
format(iteration + 1, self.iterations), color.BOLD))
success = self.run_iteration(iteration)
if not success:
if self.abort_early:
break
def run_iteration(self, iteration: int) -> bool:
iteration_success = True
for test in self.tests:
if not test['selected']:
continue
print(colorize(
"==> Running tests for {}".format(test['model']),
color.BOLD))
test_i = 0
for key, case_value in test['cases'].items():
if not case_value['selected']:
continue
print("--> Test case {} of {}: '{}' running ...".
format(test_i + 1,
self.num_cases_for_test(test),
key))
log_dir = self.get_log_dir(iteration, test['model'], key)
if self.verbose:
print("Creating log directory: {}"
.format(log_dir))
os.makedirs(log_dir, exist_ok=True)
was_success = self.run_test_case(test, key, log_dir)
print("--- Test case {} of {}: '{}' {}."
.format(test_i + 1,
self.num_cases_for_test(test),
key,
colorize("succeeded", color.GREEN)
if was_success
else colorize("failed", color.RED)))
if not was_success:
iteration_success = False
if not was_success and self.abort_early:
print("Aborting early")
return False
test_i += 1
return iteration_success
def get_log_dir(self, iteration: int, model: str, case: str) -> str:
date_and_time = self.start_time.strftime("%Y-%m-%dT%H-%M-%SZ")
foldername = os.path.join(self.log_dir, date_and_time)
if self.iterations > 1:
# Use as many zeros in foldername as required.
digits = math.floor(math.log10(self.iterations)) + 1
foldername = os.path.join(foldername,
str(iteration + 1).zfill(digits))
foldername = os.path.join(foldername, model)
foldername = os.path.join(foldername, case.replace(" ", "_"))
return foldername
def get_max_speed_factor(self, test: Dict[str, Any]) -> float:
speed_factor = self.speed_factor
if "max_speed_factor" in test:
speed_factor = min(float(speed_factor), test["max_speed_factor"])
return speed_factor
def run_test_case(self, test: Dict[str, Any],
case: str, log_dir: str) -> bool:
logfile_path = self.determine_logfile_path(log_dir, 'combined')
self.start_combined_log(logfile_path)
self.start_runners(log_dir, test, case)
test_timeout_s = test['timeout_min'] * 60
while self.active_runners[-1].time_elapsed_s() < test_timeout_s:
returncode = self.active_runners[-1].poll()
self.collect_runner_output()
if returncode is not None:
is_success = (returncode == 0)
break
else:
print(colorize(
"Test timeout of {} mins triggered!".
format(test['timeout_min']),
color.BOLD))
is_success = False
self.stop_runners()
# Collect what was left in output buffers.
self.collect_runner_output()
self.stop_combined_log()
result = {'success': is_success,
'logfiles': [runner.get_log_filename()
for runner in self.active_runners]}
test['cases'][case]['results'].append(result)
if not is_success:
if not self.verbose:
print(self.get_combined_log(logfile_path))
print("Logfiles: ")
print(" - {}".format(logfile_path))
for runner in self.active_runners:
print(" - {}".format(runner.get_log_filename()))
if self.upload:
ulog_file = self.parse_for_ulog_file(
self.get_combined_log(logfile_path))
self.upload_log(ulog_file, test['model'], case, is_success)
return is_success
def start_runners(self,
log_dir: str,
test: Dict[str, Any],
case: str) -> None:
self.active_runners = []
if self.config['mode'] == 'sitl':
if self.config['simulator'] == 'gazebo':
# Use RegEx to extract worldname.world from case name
match = re.search(r'\((.*?\.world)\)', case)
if match:
world_name = match.group(1)
else:
world_name = 'empty.world'
gzserver_runner = ph.GzserverRunner(
os.getcwd(),
log_dir,
test['vehicle'],
case,
self.get_max_speed_factor(test),
self.verbose,
self.build_dir,
world_name)
self.active_runners.append(gzserver_runner)
gzmodelspawn_runner = ph.GzmodelspawnRunner(
os.getcwd(),
log_dir,
test['vehicle'],
case,
self.verbose,
self.build_dir)
self.active_runners.append(gzmodelspawn_runner)
if self.gui:
gzclient_runner = ph.GzclientRunner(
os.getcwd(),
log_dir,
test['model'],
case,
self.verbose)
self.active_runners.append(gzclient_runner)
# We must start the PX4 instance at the end, as starting
# it in the beginning, then connecting Gazebo server freaks
# out the PX4 (it needs to have data coming in when started),
# and can lead to EKF to freak out, or the instance itself
# to die unexpectedly.
px4_runner = ph.Px4Runner(
os.getcwd(),
log_dir,
test['model'],
case,
self.get_max_speed_factor(test),
self.debugger,
self.verbose,
self.build_dir,
self.tester_interface.rootfs_base_dirname())
for env_key in test.get('env', []):
px4_runner.env[env_key] = str(test['env'][env_key])
self.active_runners.append(px4_runner)
self.active_runners.append(self.tester_interface.create_test_runner(
os.getcwd(),
log_dir,
test['model'],
case,
self.config,
self.speed_factor,
self.verbose,
self.build_dir
))
abort = False
for runner in self.active_runners:
runner.set_log_filename(
self.determine_logfile_path(log_dir, runner.name))
if not self.try_to_run_several_times(runner):
abort = True
break
if abort:
print("Could not start runner: {}".format(runner.name))
self.collect_runner_output()
self.stop_combined_log()
self.stop_runners()
sys.exit(1)
def try_to_run_several_times(self, runner: ph.Runner) -> bool:
for _ in range(3):
runner.start()
if runner.has_started_ok():
return True
else:
runner.stop()
time.sleep(1)
return False
def stop_runners(self) -> None:
for runner in self.active_runners:
runner.stop()
def collect_runner_output(self) -> None:
for runner in self.active_runners:
while True:
line = runner.get_output_line()
if not line:
break
self.add_to_combined_log(line)
if self.verbose:
print(line, end="")
def parse_for_ulog_file(self, log: str) -> Optional[str]:
match = "[logger] Opened full log file: ./"
for line in log.splitlines():
found = line.find(match)
if found != -1:
return os.path.join(os.getcwd(), self.build_dir,
self.tester_interface.rootfs_base_dirname(),
"rootfs",
line[found + len(match):])
return None
def upload_log(self, ulog_path: Optional[str],
model: str, case: str, success: bool) -> None:
if not ulog_path:
print(" Could not find ulog log file to upload")
return
if not os.getenv('GITHUB_RUN_ID'):
print(" Upload only implemented for GitHub Actions CI")
return
print(" Uploading logfile '{}' ...".format(ulog_path))
server = "https://logs.px4.io"
result_str = "passing" if success else "failing"
payload = {
"type": "flightreport",
"description": "SITL integration test - {}: '{}' -> {}"
.format(model, case, result_str),
"feedback":
"{}/{}/actions/runs/{}"
.format(os.getenv("GITHUB_SERVER_URL"),
os.getenv("GITHUB_REPOSITORY"),
os.getenv("GITHUB_RUN_ID")),
"email": "",
"source": "CI",
"videoUrl": "",
"rating": "notset",
"windSpeed": -1,
"public": "true"
}
with open(ulog_path, 'rb') as f:
r = requests.post(server + "/upload",
data=payload,
files={'filearg': f},
allow_redirects=False)
if r.status_code == 302: # redirect
if 'Location' in r.headers:
plot_url = r.headers['Location']
if len(plot_url) > 0 and plot_url[0] == '/':
plot_url = server + plot_url
print(" Uploaded to: " + plot_url)
else:
print(" Upload failed with status_code: {}"
.format(r.status_code))
def start_combined_log(self, filename: str) -> None:
self.log_fd = open(filename, 'w')
def stop_combined_log(self) -> None:
if self.log_fd:
self.log_fd.close()
def add_to_combined_log(self, output: str) -> None:
self.log_fd.write(output)
def get_combined_log(self, filename: str) -> str:
with open(filename, 'r') as f:
return f.read()
@staticmethod
def determine_logfile_path(log_dir: str, desc: str) -> str:
return os.path.join(log_dir, "log-{}.log".format(desc))
def show_detailed_results(self) -> None:
print()
print(colorize("Results:", color.BOLD))
for test in self.tests:
if not test['selected']:
print(colorize(colorize(
" - {} (not selected)".
format(test['model']), color.BOLD), color.GRAY))
continue
else:
print(colorize(" - {}:".format(test['model']), color.BOLD))
for name, case in test['cases'].items():
if not case['selected']:
continue
print(" - '{}': ".format(name), end="")
n_succeeded = [result['success']
for result in case['results']].count(True)
n_failed = [result['success']
for result in case['results']].count(False)
n_cancelled = self.iterations - len(case['results'])
notes = [
self.note_if_any(
colorize("{}succeeded", color.GREEN),
n_succeeded, self.iterations),
self.note_if_any(
colorize("{}failed", color.RED),
n_failed, self.iterations),
self.note_if_any(
colorize("{}cancelled", color.GRAY),
n_cancelled, self.iterations)]
notes_without_none = list(filter(None, notes))
print(", ".join(notes_without_none))
def was_overall_pass(self) -> bool:
for test in self.tests:
if not test['selected']:
continue
for case in test['cases'].values():
if not case['selected']:
continue
for result in case['results']:
if result['success'] is not True:
return False
return True
def show_overall_result(self) -> None:
print(colorize(
"Overall result: {}".format(
colorize("PASS", color.GREEN)
if self.was_overall_pass()
else colorize("FAIL", color.RED)), color.BOLD))
@staticmethod
def note_if_any(text_to_format: str, n: int, total: int) -> Optional[str]:
assert not n < 0
if n == 0:
return None
elif n == 1 and total == 1:
return text_to_format.format("")
else:
return text_to_format.format(str(n) + " ")
def sigint_handler(self, sig: int, frame: Optional[FrameType]) \
-> NoReturn:
print("Received SIGINT")
print("Stopping all processes ...")
self.stop_runners()
self.stop_combined_log()
print("Stopping all processes done.")
self.show_detailed_results()
sys.exit(-sig)

View File

@ -1,35 +1,55 @@
#!/usr/bin/env python3
import argparse
import datetime
import fnmatch
import json
import math
import os
import psutil # type: ignore
import re
import signal
import subprocess
import sys
import time
from logger_helper import color, colorize
import process_helper as ph
from typing import Any, Dict, List, NoReturn, TextIO, Optional
from types import FrameType
from integration_test_runner import test_runner, logger_helper
import integration_test_runner.process_helper as ph
from typing import Any, Dict, List, NoReturn
try:
import requests
except ImportError as e:
print("Failed to import requests: " + str(e))
print("")
print("You may need to install it using:")
print(" pip3 install --user requests")
print("")
sys.exit(1)
class TesterInterfaceMavsdk(test_runner.TesterInterface):
def query_test_cases(self, build_dir: str, filter: str) -> List[str]:
cmd = os.path.join(build_dir, 'mavsdk_tests/mavsdk_tests')
args = ["--list-test-names-only", filter]
p = subprocess.Popen(
[cmd] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
assert p.stdout is not None
cases = str(p.stdout.read().decode("utf-8")).strip().split('\n')
return cases
def create_test_runner(self,
workspace_dir: str,
log_dir: str,
model: str,
case: str,
config: Dict[str, Any],
speed_factor: float,
verbose: bool,
build_dir: str) -> ph.Runner:
return ph.TestRunnerMavsdk(
workspace_dir,
log_dir,
model,
case,
config['mavlink_connection'],
speed_factor,
verbose,
build_dir)
def rootfs_base_dirname(self) -> str:
return "tmp_mavsdk_tests"
def main() -> NoReturn:
parser = argparse.ArgumentParser()
parser.add_argument("--log-dir",
help="Directory for log files", default="logs")
@ -50,6 +70,8 @@ def main() -> NoReturn:
help="choice from valgrind, callgrind, gdb, lldb")
parser.add_argument("--upload", default=False, action='store_true',
help="Upload logs to logs.px4.io")
parser.add_argument("--force-color", default=False, action='store_true',
help="Force colorized output")
parser.add_argument("--verbose", default=False, action='store_true',
help="enable more verbose output")
parser.add_argument("config_file", help="JSON config file to use")
@ -58,6 +80,9 @@ def main() -> NoReturn:
help="relative path where the built files are stored")
args = parser.parse_args()
if args.force_color:
logger_helper.force_color = True
with open(args.config_file) as json_file:
config = json.load(json_file)
@ -73,7 +98,8 @@ def main() -> NoReturn:
print("Creating directory: {}".format(args.log_dir))
os.makedirs(args.log_dir, exist_ok=True)
tester = Tester(
tester_interface = TesterInterfaceMavsdk()
tester = test_runner.Tester(
config,
args.iterations,
args.abort_early,
@ -85,7 +111,8 @@ def main() -> NoReturn:
args.gui,
args.verbose,
args.upload,
args.build_dir
args.build_dir,
tester_interface
)
signal.signal(signal.SIGINT, tester.sigint_handler)
@ -134,538 +161,5 @@ def is_everything_ready(config: Dict[str, str], build_dir: str) -> bool:
return result
class Tester:
def __init__(self,
config: Dict[str, Any],
iterations: int,
abort_early: bool,
speed_factor: float,
model: str,
case: str,
debugger: str,
log_dir: str,
gui: bool,
verbose: bool,
upload: bool,
build_dir: str):
self.config = config
self.build_dir = build_dir
self.active_runners: List[ph.Runner]
self.iterations = iterations
self.abort_early = abort_early
self.tests = self.determine_tests(config['tests'], model, case)
self.speed_factor = speed_factor
self.debugger = debugger
self.log_dir = log_dir
self.gui = gui
self.verbose = verbose
self.upload = upload
self.start_time = datetime.datetime.now()
self.log_fd: Any[TextIO] = None
@staticmethod
def wildcard_match(pattern: str, potential_match: str) -> bool:
return fnmatch.fnmatchcase(potential_match, pattern)
@staticmethod
def plural_s(n_items: int) -> str:
if n_items > 1:
return "s"
else:
return ""
@staticmethod
def query_test_cases(build_dir: str, filter: str) -> List[str]:
cmd = os.path.join(build_dir, 'mavsdk_tests/mavsdk_tests')
args = ["--list-test-names-only", filter]
p = subprocess.Popen(
[cmd] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
assert p.stdout is not None
cases = str(p.stdout.read().decode("utf-8")).strip().split('\n')
return cases
def determine_tests(self,
tests: List[Dict[str, Any]],
model: str,
case: str) -> List[Dict[str, Any]]:
for test in tests:
test['selected'] = (model == 'all' or model == test['model'])
test['cases'] = dict.fromkeys(
self.query_test_cases(self.build_dir, test['test_filter']))
for key in test['cases'].keys():
test['cases'][key] = {
'selected': (test['selected'] and
(case == 'all' or
self.wildcard_match(case, key)))}
return tests
def run(self) -> bool:
self.show_plans()
self.prepare_for_results()
self.run_tests()
self.show_detailed_results()
self.show_overall_result()
return self.was_overall_pass()
def show_plans(self) -> None:
print()
print(colorize(
"About to run {} test case{} for {} selected model{} "
"({} iteration{}):".
format(self.num_cases(), self.plural_s(self.num_cases()),
self.num_models(), self.plural_s(self.num_models()),
self.iterations, self.plural_s(self.iterations)),
color.BOLD))
for test in self.tests:
if not test['selected']:
print(colorize(colorize(
" - {} (not selected)".format(test['model']),
color.BOLD), color.GRAY))
continue
print(colorize(" - {}:".format(test['model']), color.BOLD))
for key, case in test['cases'].items():
if case['selected']:
print(" - '{}'".format(key))
else:
print(colorize(" - '{}' (not selected)".format(key),
color.GRAY))
print()
def num_models(self) -> int:
return sum(map(
lambda test: 1 if test['selected'] else 0,
self.tests))
def num_cases(self) -> int:
n_cases = 0
for test in self.tests:
if not test['selected']:
continue
n_cases += self.num_cases_for_test(test)
return n_cases
def num_cases_for_test(self, test: Dict[str, Any]) -> int:
n_cases = 0
for case in test['cases'].values():
if not case['selected']:
continue
n_cases += 1
return n_cases
def prepare_for_results(self) -> None:
for test in self.tests:
if not test['selected']:
continue
for key, value in test['cases'].items():
if not value['selected']:
continue
value['results'] = []
def run_tests(self) -> None:
for iteration in range(self.iterations):
if self.iterations > 1:
print(colorize("%%% Test iteration: {} / {}".
format(iteration + 1, self.iterations), color.BOLD))
success = self.run_iteration(iteration)
if not success:
if self.abort_early:
break
def run_iteration(self, iteration: int) -> bool:
iteration_success = True
for test in self.tests:
if not test['selected']:
continue
print(colorize(
"==> Running tests for {}".format(test['model']),
color.BOLD))
test_i = 0
for key, case_value in test['cases'].items():
if not case_value['selected']:
continue
print("--> Test case {} of {}: '{}' running ...".
format(test_i+1,
self.num_cases_for_test(test),
key))
log_dir = self.get_log_dir(iteration, test['model'], key)
if self.verbose:
print("Creating log directory: {}"
.format(log_dir))
os.makedirs(log_dir, exist_ok=True)
was_success = self.run_test_case(test, key, log_dir)
print("--- Test case {} of {}: '{}' {}."
.format(test_i+1,
self.num_cases_for_test(test),
key,
colorize("succeeded", color.GREEN)
if was_success
else colorize("failed", color.RED)))
if not was_success:
iteration_success = False
if not was_success and self.abort_early:
print("Aborting early")
return False
test_i += 1
return iteration_success
def get_log_dir(self, iteration: int, model: str, case: str) -> str:
date_and_time = self.start_time.strftime("%Y-%m-%dT%H-%M-%SZ")
foldername = os.path.join(self.log_dir, date_and_time)
if self.iterations > 1:
# Use as many zeros in foldername as required.
digits = math.floor(math.log10(self.iterations)) + 1
foldername = os.path.join(foldername,
str(iteration + 1).zfill(digits))
foldername = os.path.join(foldername, model)
foldername = os.path.join(foldername, case.replace(" ", "_"))
return foldername
def get_max_speed_factor(self, test: Dict[str, Any]) -> float:
speed_factor = self.speed_factor
if "max_speed_factor" in test:
speed_factor = min(float(speed_factor), test["max_speed_factor"])
return speed_factor
def run_test_case(self, test: Dict[str, Any],
case: str, log_dir: str) -> bool:
logfile_path = self.determine_logfile_path(log_dir, 'combined')
self.start_combined_log(logfile_path)
self.start_runners(log_dir, test, case)
test_timeout_s = test['timeout_min']*60
while self.active_runners[-1].time_elapsed_s() < test_timeout_s:
returncode = self.active_runners[-1].poll()
self.collect_runner_output()
if returncode is not None:
is_success = (returncode == 0)
break
else:
print(colorize(
"Test timeout of {} mins triggered!".
format(test['timeout_min']),
color.BOLD))
is_success = False
self.stop_runners()
# Collect what was left in output buffers.
self.collect_runner_output()
self.stop_combined_log()
result = {'success': is_success,
'logfiles': [runner.get_log_filename()
for runner in self.active_runners]}
test['cases'][case]['results'].append(result)
if not is_success:
if not self.verbose:
print(self.get_combined_log(logfile_path))
print("Logfiles: ")
print(" - {}".format(logfile_path))
for runner in self.active_runners:
print(" - {}".format(runner.get_log_filename()))
if self.upload:
ulog_file = self.parse_for_ulog_file(
self.get_combined_log(logfile_path))
self.upload_log(ulog_file, test['model'], case, is_success)
return is_success
def start_runners(self,
log_dir: str,
test: Dict[str, Any],
case: str) -> None:
self.active_runners = []
if self.config['mode'] == 'sitl':
if self.config['simulator'] == 'gazebo':
# Use RegEx to extract worldname.world from case name
match = re.search(r'\((.*?\.world)\)', case)
if match:
world_name = match.group(1)
else:
world_name = 'empty.world'
gzserver_runner = ph.GzserverRunner(
os.getcwd(),
log_dir,
test['vehicle'],
case,
self.get_max_speed_factor(test),
self.verbose,
self.build_dir,
world_name)
self.active_runners.append(gzserver_runner)
gzmodelspawn_runner = ph.GzmodelspawnRunner(
os.getcwd(),
log_dir,
test['vehicle'],
case,
self.verbose,
self.build_dir)
self.active_runners.append(gzmodelspawn_runner)
if self.gui:
gzclient_runner = ph.GzclientRunner(
os.getcwd(),
log_dir,
test['model'],
case,
self.verbose)
self.active_runners.append(gzclient_runner)
# We must start the PX4 instance at the end, as starting
# it in the beginning, then connecting Gazebo server freaks
# out the PX4 (it needs to have data coming in when started),
# and can lead to EKF to freak out, or the instance itself
# to die unexpectedly.
px4_runner = ph.Px4Runner(
os.getcwd(),
log_dir,
test['model'],
case,
self.get_max_speed_factor(test),
self.debugger,
self.verbose,
self.build_dir)
for env_key in test.get('env', []):
px4_runner.env[env_key] = str(test['env'][env_key])
self.active_runners.append(px4_runner)
mavsdk_tests_runner = ph.TestRunner(
os.getcwd(),
log_dir,
test['model'],
case,
self.config['mavlink_connection'],
self.speed_factor,
self.verbose,
self.build_dir)
self.active_runners.append(mavsdk_tests_runner)
abort = False
for runner in self.active_runners:
runner.set_log_filename(
self.determine_logfile_path(log_dir, runner.name))
if not self.try_to_run_several_times(runner):
abort = True
break
if abort:
print("Could not start runner: {}".format(runner.name))
self.collect_runner_output()
self.stop_combined_log()
self.stop_runners()
sys.exit(1)
def try_to_run_several_times(self, runner: ph.Runner) -> bool:
for _ in range(3):
runner.start()
if runner.has_started_ok():
return True
else:
runner.stop()
time.sleep(1)
return False
def stop_runners(self) -> None:
for runner in self.active_runners:
runner.stop()
def collect_runner_output(self) -> None:
for runner in self.active_runners:
while True:
line = runner.get_output_line()
if not line:
break
self.add_to_combined_log(line)
if self.verbose:
print(line, end="")
def parse_for_ulog_file(self, log: str) -> Optional[str]:
match = "[logger] Opened full log file: ./"
for line in log.splitlines():
found = line.find(match)
if found != -1:
return os.path.join(os.getcwd(), self.build_dir,
"tmp_mavsdk_tests/rootfs",
line[found+len(match):])
return None
def upload_log(self, ulog_path: Optional[str],
model: str, case: str, success: bool) -> None:
if not ulog_path:
print(" Could not find ulog log file to upload")
return
if not os.getenv('GITHUB_RUN_ID'):
print(" Upload only implemented for GitHub Actions CI")
return
print(" Uploading logfile '{}' ...".format(ulog_path))
server = "https://logs.px4.io"
result_str = "passing" if success else "failing"
payload = {
"type": "flightreport",
"description": "SITL integration test - {}: '{}' -> {}"
.format(model, case, result_str),
"feedback":
"{}/{}/actions/runs/{}"
.format(os.getenv("GITHUB_SERVER_URL"),
os.getenv("GITHUB_REPOSITORY"),
os.getenv("GITHUB_RUN_ID")),
"email": "",
"source": "CI",
"videoUrl": "",
"rating": "notset",
"windSpeed": -1,
"public": "true"
}
with open(ulog_path, 'rb') as f:
r = requests.post(server + "/upload",
data=payload,
files={'filearg': f},
allow_redirects=False)
if r.status_code == 302: # redirect
if 'Location' in r.headers:
plot_url = r.headers['Location']
if len(plot_url) > 0 and plot_url[0] == '/':
plot_url = server + plot_url
print(" Uploaded to: " + plot_url)
else:
print(" Upload failed with status_code: {}"
.format(r.status_code))
def start_combined_log(self, filename: str) -> None:
self.log_fd = open(filename, 'w')
def stop_combined_log(self) -> None:
if self.log_fd:
self.log_fd.close()
def add_to_combined_log(self, output: str) -> None:
self.log_fd.write(output)
def get_combined_log(self, filename: str) -> str:
with open(filename, 'r') as f:
return f.read()
@staticmethod
def determine_logfile_path(log_dir: str, desc: str) -> str:
return os.path.join(log_dir, "log-{}.log".format(desc))
def show_detailed_results(self) -> None:
print()
print(colorize("Results:", color.BOLD))
for test in self.tests:
if not test['selected']:
print(colorize(colorize(
" - {} (not selected)".
format(test['model']), color.BOLD), color.GRAY))
continue
else:
print(colorize(" - {}:".format(test['model']), color.BOLD))
for name, case in test['cases'].items():
if not case['selected']:
continue
print(" - '{}': ".format(name), end="")
n_succeeded = [result['success']
for result in case['results']].count(True)
n_failed = [result['success']
for result in case['results']].count(False)
n_cancelled = self.iterations - len(case['results'])
notes = [
self.note_if_any(
colorize("{}succeeded", color.GREEN),
n_succeeded, self.iterations),
self.note_if_any(
colorize("{}failed", color.RED),
n_failed, self.iterations),
self.note_if_any(
colorize("{}cancelled", color.GRAY),
n_cancelled, self.iterations)]
notes_without_none = list(filter(None, notes))
print(", ".join(notes_without_none))
def was_overall_pass(self) -> bool:
for test in self.tests:
if not test['selected']:
continue
for case in test['cases'].values():
if not case['selected']:
continue
for result in case['results']:
if result['success'] is not True:
return False
return True
def show_overall_result(self) -> None:
print(colorize(
"Overall result: {}".format(
colorize("PASS", color.GREEN)
if self.was_overall_pass()
else colorize("FAIL", color.RED)), color.BOLD))
@staticmethod
def note_if_any(text_to_format: str, n: int, total: int) -> Optional[str]:
assert not n < 0
if n == 0:
return None
elif n == 1 and total == 1:
return text_to_format.format("")
else:
return text_to_format.format(str(n) + " ")
def sigint_handler(self, sig: int, frame: Optional[FrameType]) \
-> NoReturn:
print("Received SIGINT")
print("Stopping all processes ...")
self.stop_runners()
self.stop_combined_log()
print("Stopping all processes done.")
self.show_detailed_results()
sys.exit(-sig)
if __name__ == '__main__':
main()

252
test/ros_test_runner.py Executable file
View File

@ -0,0 +1,252 @@
#!/usr/bin/env python3
import argparse
import json
import os
import psutil # type: ignore
import signal
import subprocess
import sys
from mavsdk_tests.integration_test_runner import test_runner, process_helper as ph, logger_helper
from typing import Any, Dict, List, NoReturn
class MicroXrceAgent:
def __init__(self, verbose: bool):
self._verbose = verbose
# Potential binary names to check for
self._binary_names = ['MicroXRCEAgent', 'micro-xrce-dds-agent']
self._proc = None
def is_running(self) -> bool:
return any(is_running(name) for name in self._binary_names)
def start_process(self):
if self._verbose:
print('Starting micro-xrce-dds-agent')
assert self._proc is None
for name in self._binary_names:
try:
self._proc = subprocess.Popen([name] + 'udp4 -p 8888'.split())
except FileNotFoundError:
pass
else: # Process was started
break
if self._proc is None:
raise RuntimeError(f'Failed to start agent, tried these binaries: {self._binary_names}.\n'
'Make sure it is installed and available. '
'You can also run it manually before running this script')
def stop_process_if_started(self):
if self._proc is None:
return
if self._verbose:
print('Stopping micro-xrce-dds-agent')
self._proc.kill()
self._proc = None
class TesterInterfaceRos(test_runner.TesterInterface):
def __init__(self, ros_package_build_dir: str):
self._ros_package_build_dir = ros_package_build_dir
def query_test_cases(self, build_dir: str, filter: str) -> List[str]:
cmd = os.path.join(self._ros_package_build_dir, 'integration_tests')
args = ["--gtest_list_tests", "--gtest_filter=" + filter]
# Example output:
# Running main() from ros2_install/install/gtest_vendor/src/gtest_vendor/src/gtest_main.cc
# Tester.
# runModeTests
p = subprocess.Popen(
[cmd] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
assert p.stdout is not None
lines = str(p.stdout.read().decode("utf-8")).strip().split('\n')
if lines[0].startswith('Running main'):
del lines[0]
cases = []
test_suite_name = None
for line in lines:
if line.startswith(' '):
assert test_suite_name is not None
cases.append(test_suite_name + line.strip())
else:
test_suite_name = line
return cases
def create_test_runner(self,
workspace_dir: str,
log_dir: str,
model: str,
case: str,
config: Dict[str, Any],
speed_factor: float,
verbose: bool,
build_dir: str) -> ph.Runner:
return ph.TestRunnerRos(
workspace_dir,
log_dir,
model,
case,
verbose,
self._ros_package_build_dir)
def rootfs_base_dirname(self) -> str:
return "tmp_ros_tests"
def main() -> NoReturn:
parser = argparse.ArgumentParser()
parser.add_argument("--list-cases", action='store_true',
help="List available test cases")
parser.add_argument("--log-dir",
help="Directory for log files", default="logs")
parser.add_argument("--iterations", type=int, default=1,
help="how often to run all tests")
parser.add_argument("--abort-early", action='store_true',
help="abort on first unsuccessful test")
parser.add_argument("--gui", default=False, action='store_true',
help="display the visualization for a simulation")
parser.add_argument("--model", type=str, default='all',
help="only run tests for one model")
parser.add_argument("--case", type=str, default='all',
help="only run tests for one case "
"(or multiple cases with wildcard '*')")
parser.add_argument("--debugger", default="",
help="choice from valgrind, callgrind, gdb, lldb")
parser.add_argument("--upload", default=False, action='store_true',
help="Upload logs to logs.px4.io")
parser.add_argument("--force-color", default=False, action='store_true',
help="Force colorized output")
parser.add_argument("--verbose", default=False, action='store_true',
help="enable more verbose output")
parser.add_argument("--config-file", help="JSON config file to use",
default="test/ros_tests/config.json")
parser.add_argument("--build-dir", type=str,
default='build/px4_sitl_default/',
help="relative path where the built files are stored")
parser.add_argument("--px4-ros2-interface-lib-build-dir", type=str,
default=None,
help="path which contains the integration_tests binary. "
"If not provided, it is determined via 'ros2 pkg'")
args = parser.parse_args()
if args.force_color:
logger_helper.force_color = True
ros_package_build_dir = args.px4_ros2_interface_lib_build_dir
if ros_package_build_dir is None:
ros_package_build_dir = lookup_px4_ros2_cpp_build_dir()
if args.verbose:
print(f'px4_ros2_cpp build dir: {ros_package_build_dir}')
tester_interface = TesterInterfaceRos(ros_package_build_dir)
if args.list_cases:
for case in tester_interface.query_test_cases(args.build_dir, '*'):
print(f'{case}')
sys.exit(0)
with open(args.config_file) as json_file:
config = json.load(json_file)
if config["mode"] != "sitl" and args.gui:
print("--gui is not compatible with the mode '{}'"
.format(config["mode"]))
sys.exit(1)
if not is_everything_ready(config, args.build_dir):
sys.exit(1)
if args.verbose:
print("Creating directory: {}".format(args.log_dir))
os.makedirs(args.log_dir, exist_ok=True)
speed_factor = 1 # Not (yet) supported
tester = test_runner.Tester(
config,
args.iterations,
args.abort_early,
speed_factor,
args.model,
args.case,
args.debugger,
args.log_dir,
args.gui,
args.verbose,
args.upload,
args.build_dir,
tester_interface
)
signal.signal(signal.SIGINT, tester.sigint_handler)
# Automatically start & stop the XRCE Agent if not running already
micro_xrce_agent = MicroXrceAgent(args.verbose)
if not micro_xrce_agent.is_running():
micro_xrce_agent.start_process()
try:
result = tester.run()
finally:
micro_xrce_agent.stop_process_if_started()
sys.exit(0 if result else 1)
def is_running(process_name: str) -> bool:
for proc in psutil.process_iter(attrs=['name']):
if proc.info['name'] == process_name:
return True
return False
def is_everything_ready(config: Dict[str, str], build_dir: str) -> bool:
result = True
if config['mode'] == 'sitl':
if is_running('px4'):
print("px4 process already running\n"
"run `killall px4` and try again")
result = False
if not os.path.isfile(os.path.join(build_dir, 'bin/px4')):
print("PX4 SITL is not built\n"
"run `DONT_RUN=1 make px4_sitl gazebo` or "
"`DONT_RUN=1 make px4_sitl_default gazebo`")
result = False
if config['simulator'] == 'gazebo':
if is_running('gzserver'):
print("gzserver process already running\n"
"run `killall gzserver` and try again")
result = False
if is_running('gzclient'):
print("gzclient process already running\n"
"run `killall gzclient` and try again")
result = False
return result
def lookup_px4_ros2_cpp_build_dir():
"""Get the ROS2 build directory for px4_ros2_cpp"""
try:
p = subprocess.Popen(
'ros2 pkg prefix px4_ros2_cpp'.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except FileNotFoundError:
print('Error: command "ros2" not found. Did you source the ros workspace?')
sys.exit(1)
assert p.stdout is not None
lines = str(p.stdout.read().decode("utf-8")).strip().split('\n')
assert len(lines) == 1
return os.path.join(lines[0], '../../build/px4_ros2_cpp')
if __name__ == '__main__':
main()

View File

@ -0,0 +1,31 @@
{
"mode": "sitl",
"simulator": "gazebo",
"tests":
[
{
"model": "iris",
"vehicle": "iris",
"test_filter": "ModesTest.*",
"timeout_min": 10
},
{
"model": "iris",
"vehicle": "iris",
"test_filter": "LocalPositionInterfaceTest.*",
"timeout_min": 10,
"env": {
"PX4_PARAM_EKF2_EV_CTRL": 15
}
},
{
"model": "iris",
"vehicle": "iris",
"test_filter": "GlobalPositionInterfaceTest.*",
"timeout_min": 10,
"env": {
"PX4_PARAM_EKF2_AGP_CTRL": 1
}
}
]
}