2018-01-05 02:19:51 -04:00
|
|
|
/*
|
|
|
|
* This file is free software: you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation, either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This file is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
* See the GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program. If not, see <http://www.gnu.org/licenses/>.
|
2019-10-20 10:31:12 -03:00
|
|
|
*
|
2018-01-05 02:19:51 -04:00
|
|
|
* Code by Andrew Tridgell and Siddharth Bharat Purohit
|
|
|
|
*/
|
2022-08-18 03:45:30 -03:00
|
|
|
|
|
|
|
#include <AP_HAL/AP_HAL_Boards.h>
|
|
|
|
|
|
|
|
#ifndef HAL_SCHEDULER_ENABLED
|
|
|
|
#define HAL_SCHEDULER_ENABLED 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if HAL_SCHEDULER_ENABLED
|
|
|
|
|
2018-01-05 02:19:51 -04:00
|
|
|
#include <AP_HAL/AP_HAL.h>
|
|
|
|
|
2022-02-20 23:44:56 -04:00
|
|
|
#include <hal.h>
|
2018-01-05 02:19:51 -04:00
|
|
|
#include "AP_HAL_ChibiOS.h"
|
|
|
|
#include "Scheduler.h"
|
2018-01-28 14:31:21 -04:00
|
|
|
#include "Util.h"
|
2020-09-21 05:29:40 -03:00
|
|
|
#include "GPIO.h"
|
2018-01-05 02:19:51 -04:00
|
|
|
|
|
|
|
#include <AP_HAL_ChibiOS/UARTDriver.h>
|
|
|
|
#include <AP_HAL_ChibiOS/AnalogIn.h>
|
|
|
|
#include <AP_HAL_ChibiOS/Storage.h>
|
|
|
|
#include <AP_HAL_ChibiOS/RCOutput.h>
|
|
|
|
#include <AP_HAL_ChibiOS/RCInput.h>
|
2020-05-31 09:17:00 -03:00
|
|
|
#include <AP_HAL_ChibiOS/CANIface.h>
|
2019-05-09 08:15:58 -03:00
|
|
|
#include <AP_InternalError/AP_InternalError.h>
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2018-06-25 06:50:30 -03:00
|
|
|
#if CH_CFG_USE_DYNAMIC == TRUE
|
|
|
|
|
2019-01-18 00:23:42 -04:00
|
|
|
#include <AP_Logger/AP_Logger.h>
|
2024-03-16 08:59:43 -03:00
|
|
|
#include <AP_Math/AP_Math.h>
|
2018-01-05 02:19:51 -04:00
|
|
|
#include <AP_Scheduler/AP_Scheduler.h>
|
|
|
|
#include <AP_BoardConfig/AP_BoardConfig.h>
|
2018-06-27 05:46:34 -03:00
|
|
|
#include "hwdef/common/stm32_util.h"
|
2020-12-05 19:29:31 -04:00
|
|
|
#include "hwdef/common/flash.h"
|
2019-04-11 08:12:03 -03:00
|
|
|
#include "hwdef/common/watchdog.h"
|
2020-10-23 22:26:44 -03:00
|
|
|
#include <AP_Filesystem/AP_Filesystem.h>
|
2018-01-28 21:43:55 -04:00
|
|
|
#include "shared_dma.h"
|
2021-09-04 08:59:15 -03:00
|
|
|
#include <AP_Common/ExpandingString.h>
|
2023-06-15 20:54:23 -03:00
|
|
|
#include <GCS_MAVLink/GCS.h>
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2018-10-29 18:51:37 -03:00
|
|
|
#if HAL_WITH_IO_MCU
|
|
|
|
#include <AP_IOMCU/AP_IOMCU.h>
|
|
|
|
extern AP_IOMCU iomcu;
|
|
|
|
#endif
|
|
|
|
|
2018-01-05 02:19:51 -04:00
|
|
|
using namespace ChibiOS;
|
|
|
|
|
2024-04-01 03:29:31 -03:00
|
|
|
#ifndef HAL_RCIN_THREAD_ENABLED
|
|
|
|
#define HAL_RCIN_THREAD_ENABLED 1
|
|
|
|
#endif
|
|
|
|
|
2018-01-05 02:19:51 -04:00
|
|
|
extern const AP_HAL::HAL& hal;
|
2018-10-05 21:32:00 -03:00
|
|
|
#ifndef HAL_NO_TIMER_THREAD
|
2018-08-29 10:14:45 -03:00
|
|
|
THD_WORKING_AREA(_timer_thread_wa, TIMER_THD_WA_SIZE);
|
2018-10-05 21:32:00 -03:00
|
|
|
#endif
|
2020-12-05 15:16:27 -04:00
|
|
|
#ifndef HAL_NO_RCOUT_THREAD
|
|
|
|
THD_WORKING_AREA(_rcout_thread_wa, RCOUT_THD_WA_SIZE);
|
|
|
|
#endif
|
2024-04-01 03:29:31 -03:00
|
|
|
#if HAL_RCIN_THREAD_ENABLED
|
2018-08-29 10:14:45 -03:00
|
|
|
THD_WORKING_AREA(_rcin_thread_wa, RCIN_THD_WA_SIZE);
|
2018-10-05 21:32:00 -03:00
|
|
|
#endif
|
2018-08-29 10:14:45 -03:00
|
|
|
#ifndef HAL_USE_EMPTY_IO
|
|
|
|
THD_WORKING_AREA(_io_thread_wa, IO_THD_WA_SIZE);
|
|
|
|
#endif
|
|
|
|
#ifndef HAL_USE_EMPTY_STORAGE
|
|
|
|
THD_WORKING_AREA(_storage_thread_wa, STORAGE_THD_WA_SIZE);
|
|
|
|
#endif
|
2019-05-09 08:15:58 -03:00
|
|
|
#ifndef HAL_NO_MONITOR_THREAD
|
|
|
|
THD_WORKING_AREA(_monitor_thread_wa, MONITOR_THD_WA_SIZE);
|
|
|
|
#endif
|
|
|
|
|
2023-08-15 23:18:52 -03:00
|
|
|
// while the vehicle is being initialised we expect there to be random
|
|
|
|
// delays which may exceed the watchdog timeout. By default, We pat
|
|
|
|
// the watchdog in the timer thread during setup to avoid the watchdog:
|
|
|
|
#ifndef AP_HAL_CHIBIOS_IN_EXPECTED_DELAY_WHEN_NOT_INITIALISED
|
|
|
|
#define AP_HAL_CHIBIOS_IN_EXPECTED_DELAY_WHEN_NOT_INITIALISED 1
|
|
|
|
#endif
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
Scheduler::Scheduler()
|
2018-05-21 20:08:35 -03:00
|
|
|
{
|
|
|
|
}
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::init()
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2018-05-21 20:08:35 -03:00
|
|
|
chBSemObjectInit(&_timer_semaphore, false);
|
|
|
|
chBSemObjectInit(&_io_semaphore, false);
|
2018-10-05 21:32:00 -03:00
|
|
|
|
2019-05-09 08:15:58 -03:00
|
|
|
#ifndef HAL_NO_MONITOR_THREAD
|
|
|
|
// setup the monitor thread - this is used to detect software lockups
|
|
|
|
_monitor_thread_ctx = chThdCreateStatic(_monitor_thread_wa,
|
|
|
|
sizeof(_monitor_thread_wa),
|
|
|
|
APM_MONITOR_PRIORITY, /* Initial priority. */
|
|
|
|
_monitor_thread, /* Thread function. */
|
|
|
|
this); /* Thread parameter. */
|
|
|
|
#endif
|
|
|
|
|
2018-10-05 21:32:00 -03:00
|
|
|
#ifndef HAL_NO_TIMER_THREAD
|
2018-01-05 02:19:51 -04:00
|
|
|
// setup the timer thread - this will call tasks at 1kHz
|
|
|
|
_timer_thread_ctx = chThdCreateStatic(_timer_thread_wa,
|
|
|
|
sizeof(_timer_thread_wa),
|
|
|
|
APM_TIMER_PRIORITY, /* Initial priority. */
|
|
|
|
_timer_thread, /* Thread function. */
|
|
|
|
this); /* Thread parameter. */
|
2018-10-05 21:32:00 -03:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2020-12-05 15:16:27 -04:00
|
|
|
#ifndef HAL_NO_RCOUT_THREAD
|
|
|
|
// setup the RCOUT thread - this will call tasks at 1kHz
|
|
|
|
_rcout_thread_ctx = chThdCreateStatic(_rcout_thread_wa,
|
|
|
|
sizeof(_rcout_thread_wa),
|
|
|
|
APM_RCOUT_PRIORITY, /* Initial priority. */
|
|
|
|
_rcout_thread, /* Thread function. */
|
|
|
|
this); /* Thread parameter. */
|
|
|
|
#endif
|
|
|
|
|
2024-04-01 03:29:31 -03:00
|
|
|
#if HAL_RCIN_THREAD_ENABLED
|
2018-01-18 15:12:38 -04:00
|
|
|
// setup the RCIN thread - this will call tasks at 1kHz
|
|
|
|
_rcin_thread_ctx = chThdCreateStatic(_rcin_thread_wa,
|
|
|
|
sizeof(_rcin_thread_wa),
|
|
|
|
APM_RCIN_PRIORITY, /* Initial priority. */
|
|
|
|
_rcin_thread, /* Thread function. */
|
|
|
|
this); /* Thread parameter. */
|
2018-10-05 21:32:00 -03:00
|
|
|
#endif
|
2018-08-29 10:14:45 -03:00
|
|
|
#ifndef HAL_USE_EMPTY_IO
|
2018-01-05 02:19:51 -04:00
|
|
|
// the IO thread runs at lower priority
|
|
|
|
_io_thread_ctx = chThdCreateStatic(_io_thread_wa,
|
|
|
|
sizeof(_io_thread_wa),
|
|
|
|
APM_IO_PRIORITY, /* Initial priority. */
|
|
|
|
_io_thread, /* Thread function. */
|
|
|
|
this); /* Thread parameter. */
|
2018-08-29 10:14:45 -03:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2018-08-29 10:14:45 -03:00
|
|
|
#ifndef HAL_USE_EMPTY_STORAGE
|
2018-01-05 02:19:51 -04:00
|
|
|
// the storage thread runs at just above IO priority
|
|
|
|
_storage_thread_ctx = chThdCreateStatic(_storage_thread_wa,
|
|
|
|
sizeof(_storage_thread_wa),
|
|
|
|
APM_STORAGE_PRIORITY, /* Initial priority. */
|
|
|
|
_storage_thread, /* Thread function. */
|
|
|
|
this); /* Thread parameter. */
|
2018-08-29 10:14:45 -03:00
|
|
|
#endif
|
|
|
|
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::delay_microseconds(uint16_t usec)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
if (usec == 0) { //chibios faults with 0us sleep
|
|
|
|
return;
|
|
|
|
}
|
2018-02-11 19:26:45 -04:00
|
|
|
uint32_t ticks;
|
2018-06-02 12:56:42 -03:00
|
|
|
ticks = chTimeUS2I(usec);
|
2018-02-11 19:26:45 -04:00
|
|
|
if (ticks == 0) {
|
|
|
|
// calling with ticks == 0 causes a hard fault on ChibiOS
|
|
|
|
ticks = 1;
|
|
|
|
}
|
2023-04-17 01:12:25 -03:00
|
|
|
ticks = MIN(TIME_MAX_INTERVAL, ticks);
|
2021-08-22 06:35:07 -03:00
|
|
|
chThdSleep(MAX(ticks,CH_CFG_ST_TIMEDELTA)); //Suspends Thread for desired microseconds
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
wrapper around sem_post that boosts main thread priority
|
|
|
|
*/
|
|
|
|
static void set_high_priority()
|
|
|
|
{
|
|
|
|
#if APM_MAIN_PRIORITY_BOOST != APM_MAIN_PRIORITY
|
|
|
|
hal_chibios_set_priority(APM_MAIN_PRIORITY_BOOST);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
return the main thread to normal priority
|
|
|
|
*/
|
2018-05-04 05:30:25 -03:00
|
|
|
void Scheduler::boost_end(void)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
#if APM_MAIN_PRIORITY_BOOST != APM_MAIN_PRIORITY
|
2018-05-04 05:30:25 -03:00
|
|
|
if (in_main_thread() && _priority_boosted) {
|
|
|
|
_priority_boosted = false;
|
|
|
|
hal_chibios_set_priority(APM_MAIN_PRIORITY);
|
|
|
|
}
|
2018-01-05 02:19:51 -04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
a variant of delay_microseconds that boosts priority to
|
|
|
|
APM_MAIN_PRIORITY_BOOST for APM_MAIN_PRIORITY_BOOST_USEC
|
|
|
|
microseconds when the time completes. This significantly improves
|
2018-04-02 23:25:42 -03:00
|
|
|
the regularity of timing of the main loop
|
2018-01-05 02:19:51 -04:00
|
|
|
*/
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::delay_microseconds_boost(uint16_t usec)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2019-07-04 07:19:03 -03:00
|
|
|
if (!_priority_boosted && in_main_thread()) {
|
2018-05-04 05:30:25 -03:00
|
|
|
set_high_priority();
|
|
|
|
_priority_boosted = true;
|
2019-07-04 07:19:03 -03:00
|
|
|
_called_boost = true;
|
2018-05-04 05:30:25 -03:00
|
|
|
}
|
2018-02-09 22:46:55 -04:00
|
|
|
delay_microseconds(usec); //Suspends Thread for desired microseconds
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
return true if delay_microseconds_boost() has been called since last check
|
|
|
|
*/
|
|
|
|
bool Scheduler::check_called_boost(void)
|
|
|
|
{
|
|
|
|
if (!_called_boost) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
_called_boost = false;
|
|
|
|
return true;
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::delay(uint16_t ms)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
uint64_t start = AP_HAL::micros64();
|
|
|
|
|
|
|
|
while ((AP_HAL::micros64() - start)/1000 < ms) {
|
|
|
|
delay_microseconds(1000);
|
|
|
|
if (_min_delay_cb_ms <= ms) {
|
2018-07-06 22:51:18 -03:00
|
|
|
if (in_main_thread()) {
|
2024-01-21 23:16:08 -04:00
|
|
|
const auto old_task = hal.util->persistent_data.scheduler_task;
|
|
|
|
hal.util->persistent_data.scheduler_task = -4;
|
2018-07-06 22:51:18 -03:00
|
|
|
call_delay_cb();
|
2024-01-21 23:16:08 -04:00
|
|
|
hal.util->persistent_data.scheduler_task = old_task;
|
2018-07-06 22:51:18 -03:00
|
|
|
}
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::register_timer_process(AP_HAL::MemberProc proc)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2018-05-21 20:08:35 -03:00
|
|
|
chBSemWait(&_timer_semaphore);
|
2018-01-05 02:19:51 -04:00
|
|
|
for (uint8_t i = 0; i < _num_timer_procs; i++) {
|
|
|
|
if (_timer_proc[i] == proc) {
|
2018-05-21 20:08:35 -03:00
|
|
|
chBSemSignal(&_timer_semaphore);
|
2018-01-05 02:19:51 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_num_timer_procs < CHIBIOS_SCHEDULER_MAX_TIMER_PROCS) {
|
|
|
|
_timer_proc[_num_timer_procs] = proc;
|
|
|
|
_num_timer_procs++;
|
|
|
|
} else {
|
2022-03-21 06:24:57 -03:00
|
|
|
DEV_PRINTF("Out of timer processes\n");
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
2018-05-21 20:08:35 -03:00
|
|
|
chBSemSignal(&_timer_semaphore);
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::register_io_process(AP_HAL::MemberProc proc)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2018-05-21 20:08:35 -03:00
|
|
|
chBSemWait(&_io_semaphore);
|
2018-01-05 02:19:51 -04:00
|
|
|
for (uint8_t i = 0; i < _num_io_procs; i++) {
|
|
|
|
if (_io_proc[i] == proc) {
|
2018-05-21 20:08:35 -03:00
|
|
|
chBSemSignal(&_io_semaphore);
|
2018-01-05 02:19:51 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-10-20 10:31:12 -03:00
|
|
|
|
2018-01-05 02:19:51 -04:00
|
|
|
if (_num_io_procs < CHIBIOS_SCHEDULER_MAX_TIMER_PROCS) {
|
|
|
|
_io_proc[_num_io_procs] = proc;
|
|
|
|
_num_io_procs++;
|
|
|
|
} else {
|
2022-03-21 06:24:57 -03:00
|
|
|
DEV_PRINTF("Out of IO processes\n");
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
2018-05-21 20:08:35 -03:00
|
|
|
chBSemSignal(&_io_semaphore);
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::register_timer_failsafe(AP_HAL::Proc failsafe, uint32_t period_us)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
_failsafe = failsafe;
|
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::reboot(bool hold_in_bootloader)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
// disarm motors to ensure they are off during a bootloader upload
|
|
|
|
hal.rcout->force_safety_on();
|
|
|
|
|
2018-10-29 18:51:37 -03:00
|
|
|
#if HAL_WITH_IO_MCU
|
|
|
|
if (AP_BoardConfig::io_enabled()) {
|
|
|
|
iomcu.shutdown();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-05-17 03:51:36 -03:00
|
|
|
#if HAL_LOGGING_ENABLED
|
2018-06-06 17:00:26 -03:00
|
|
|
//stop logging
|
2019-02-11 16:41:09 -04:00
|
|
|
if (AP_Logger::get_singleton()) {
|
|
|
|
AP::logger().StopLogging();
|
|
|
|
}
|
2018-06-06 17:00:26 -03:00
|
|
|
|
2020-10-23 22:26:44 -03:00
|
|
|
// unmount filesystem, if active
|
|
|
|
AP::FS().unmount();
|
2018-08-29 10:14:45 -03:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2023-02-26 17:58:01 -04:00
|
|
|
#if AP_FASTBOOT_ENABLED
|
2018-06-27 05:46:34 -03:00
|
|
|
// setup RTC for fast reboot
|
|
|
|
set_fast_reboot(hold_in_bootloader?RTC_BOOT_HOLD:RTC_BOOT_FAST);
|
2018-08-29 10:14:45 -03:00
|
|
|
#endif
|
2018-06-27 05:46:34 -03:00
|
|
|
|
2018-06-04 08:43:44 -03:00
|
|
|
// disable all interrupt sources
|
|
|
|
port_disable();
|
2019-10-20 10:31:12 -03:00
|
|
|
|
2018-01-05 02:19:51 -04:00
|
|
|
// reboot
|
|
|
|
NVIC_SystemReset();
|
|
|
|
}
|
|
|
|
|
2018-05-13 19:47:14 -03:00
|
|
|
void Scheduler::_run_timers()
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
if (_in_timer_proc) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
_in_timer_proc = true;
|
|
|
|
|
2018-05-21 20:08:35 -03:00
|
|
|
int num_procs = 0;
|
|
|
|
chBSemWait(&_timer_semaphore);
|
|
|
|
num_procs = _num_timer_procs;
|
|
|
|
chBSemSignal(&_timer_semaphore);
|
2018-05-13 19:47:14 -03:00
|
|
|
// now call the timer based drivers
|
2018-05-21 20:08:35 -03:00
|
|
|
for (int i = 0; i < num_procs; i++) {
|
2018-05-13 19:47:14 -03:00
|
|
|
if (_timer_proc[i]) {
|
|
|
|
_timer_proc[i]();
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// and the failsafe, if one is setup
|
|
|
|
if (_failsafe != nullptr) {
|
|
|
|
_failsafe();
|
|
|
|
}
|
|
|
|
|
2018-10-28 20:24:01 -03:00
|
|
|
#if HAL_USE_ADC == TRUE && !defined(HAL_DISABLE_ADC_DRIVER)
|
2018-01-05 02:19:51 -04:00
|
|
|
// process analog input
|
2018-01-13 00:02:05 -04:00
|
|
|
((AnalogIn *)hal.analogin)->_timer_tick();
|
2018-03-06 18:41:03 -04:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
|
|
|
|
_in_timer_proc = false;
|
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::_timer_thread(void *arg)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2018-01-13 00:02:05 -04:00
|
|
|
Scheduler *sched = (Scheduler *)arg;
|
2020-04-28 03:32:29 -03:00
|
|
|
chRegSetThreadName("timer");
|
2018-01-05 02:19:51 -04:00
|
|
|
|
|
|
|
while (!sched->_hal_initialized) {
|
|
|
|
sched->delay_microseconds(1000);
|
|
|
|
}
|
|
|
|
while (true) {
|
|
|
|
sched->delay_microseconds(1000);
|
|
|
|
|
|
|
|
// run registered timers
|
2018-05-13 19:47:14 -03:00
|
|
|
sched->_run_timers();
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2020-03-10 22:20:47 -03:00
|
|
|
if (sched->in_expected_delay()) {
|
|
|
|
sched->watchdog_pat();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 15:16:27 -04:00
|
|
|
void Scheduler::_rcout_thread(void *arg)
|
|
|
|
{
|
|
|
|
#ifndef HAL_NO_RCOUT_THREAD
|
|
|
|
Scheduler *sched = (Scheduler *)arg;
|
|
|
|
chRegSetThreadName("rcout");
|
|
|
|
|
|
|
|
while (!sched->_hal_initialized) {
|
|
|
|
sched->delay_microseconds(1000);
|
|
|
|
}
|
2021-02-23 18:25:11 -04:00
|
|
|
#if HAL_USE_PWM == TRUE
|
2020-12-05 15:16:27 -04:00
|
|
|
// trampoline into the rcout thread
|
|
|
|
((RCOutput*)hal.rcout)->rcout_thread();
|
|
|
|
#endif
|
2021-02-23 18:25:11 -04:00
|
|
|
#endif
|
2020-12-05 15:16:27 -04:00
|
|
|
}
|
|
|
|
|
2020-03-10 22:20:47 -03:00
|
|
|
/*
|
|
|
|
return true if we are in a period of expected delay. This can be
|
|
|
|
used to suppress error messages
|
|
|
|
*/
|
|
|
|
bool Scheduler::in_expected_delay(void) const
|
|
|
|
{
|
2023-08-15 23:18:52 -03:00
|
|
|
#if AP_HAL_CHIBIOS_IN_EXPECTED_DELAY_WHEN_NOT_INITIALISED
|
2020-04-26 05:07:39 -03:00
|
|
|
if (!_initialized) {
|
|
|
|
// until setup() is complete we expect delays
|
|
|
|
return true;
|
|
|
|
}
|
2023-08-15 23:18:52 -03:00
|
|
|
#endif
|
2020-03-10 22:20:47 -03:00
|
|
|
if (expect_delay_start != 0) {
|
|
|
|
uint32_t now = AP_HAL::millis();
|
|
|
|
if (now - expect_delay_start <= expect_delay_length) {
|
|
|
|
return true;
|
2019-04-11 08:12:03 -03:00
|
|
|
}
|
2018-01-18 15:12:38 -04:00
|
|
|
}
|
2020-12-15 21:41:26 -04:00
|
|
|
#if !defined(HAL_NO_FLASH_SUPPORT) && !defined(HAL_BOOTLOADER_BUILD)
|
2020-12-05 19:29:31 -04:00
|
|
|
if (stm32_flash_recent_erase()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
2020-03-10 22:20:47 -03:00
|
|
|
return false;
|
2018-01-18 15:12:38 -04:00
|
|
|
}
|
2018-01-05 02:19:51 -04:00
|
|
|
|
2019-05-26 22:45:30 -03:00
|
|
|
#ifndef HAL_NO_MONITOR_THREAD
|
2019-05-09 08:15:58 -03:00
|
|
|
void Scheduler::_monitor_thread(void *arg)
|
|
|
|
{
|
|
|
|
Scheduler *sched = (Scheduler *)arg;
|
2020-04-28 03:32:29 -03:00
|
|
|
chRegSetThreadName("monitor");
|
2019-05-09 08:15:58 -03:00
|
|
|
|
|
|
|
while (!sched->_initialized) {
|
|
|
|
sched->delay(100);
|
|
|
|
}
|
|
|
|
bool using_watchdog = AP_BoardConfig::watchdog_enabled();
|
2021-05-17 03:51:36 -03:00
|
|
|
#if HAL_LOGGING_ENABLED
|
2020-04-28 03:54:56 -03:00
|
|
|
uint8_t log_wd_counter = 0;
|
|
|
|
#endif
|
2019-05-09 08:15:58 -03:00
|
|
|
|
|
|
|
while (true) {
|
|
|
|
sched->delay(100);
|
|
|
|
if (using_watchdog) {
|
|
|
|
stm32_watchdog_save((uint32_t *)&hal.util->persistent_data, (sizeof(hal.util->persistent_data)+3)/4);
|
|
|
|
}
|
2020-11-13 17:52:01 -04:00
|
|
|
|
|
|
|
// if running memory guard then check all allocations
|
|
|
|
malloc_check(nullptr);
|
|
|
|
|
2019-05-09 08:15:58 -03:00
|
|
|
uint32_t now = AP_HAL::millis();
|
|
|
|
uint32_t loop_delay = now - sched->last_watchdog_pat_ms;
|
|
|
|
if (loop_delay >= 200) {
|
|
|
|
// the main loop has been stuck for at least
|
|
|
|
// 200ms. Starting logging the main loop state
|
2021-05-17 03:51:36 -03:00
|
|
|
#if HAL_LOGGING_ENABLED
|
2019-05-15 01:12:15 -03:00
|
|
|
const AP_HAL::Util::PersistentData &pd = hal.util->persistent_data;
|
2019-11-14 00:58:38 -04:00
|
|
|
if (AP_Logger::get_singleton()) {
|
2022-04-04 02:54:12 -03:00
|
|
|
const struct log_MON mon{
|
|
|
|
LOG_PACKET_HEADER_INIT(LOG_MON_MSG),
|
|
|
|
time_us : AP_HAL::micros64(),
|
|
|
|
loop_delay : loop_delay,
|
|
|
|
current_task : pd.scheduler_task,
|
|
|
|
internal_error_mask : pd.internal_errors,
|
|
|
|
internal_error_count : pd.internal_error_count,
|
|
|
|
internal_error_line : pd.internal_error_last_line,
|
|
|
|
mavmsg : pd.last_mavlink_msgid,
|
|
|
|
mavcmd : pd.last_mavlink_cmd,
|
|
|
|
semline : pd.semaphore_line,
|
|
|
|
spicnt : pd.spi_count,
|
|
|
|
i2ccnt : pd.i2c_count
|
|
|
|
};
|
|
|
|
AP::logger().WriteCriticalBlock(&mon, sizeof(mon));
|
|
|
|
}
|
2020-11-28 00:18:35 -04:00
|
|
|
#endif
|
2019-05-09 08:15:58 -03:00
|
|
|
}
|
2020-12-05 19:29:31 -04:00
|
|
|
if (loop_delay >= 500 && !sched->in_expected_delay()) {
|
2019-05-09 08:15:58 -03:00
|
|
|
// at 500ms we declare an internal error
|
2023-06-15 20:54:23 -03:00
|
|
|
AP::internalerror().error(AP_InternalError::error_t::main_loop_stuck, hal.util->persistent_data.semaphore_line);
|
|
|
|
/*
|
|
|
|
if we are armed and get this condition then it is likely
|
|
|
|
a lock ordering deadlock. If the main thread is waiting
|
|
|
|
on a mutex then we try to force release the mutex from
|
|
|
|
the thread that is holding it.
|
|
|
|
*/
|
|
|
|
try_force_mutex();
|
2019-05-09 08:15:58 -03:00
|
|
|
}
|
2020-04-28 03:54:56 -03:00
|
|
|
|
2021-10-31 07:41:05 -03:00
|
|
|
#if AP_CRASHDUMP_ENABLED
|
|
|
|
if (loop_delay >= 1800 && using_watchdog) {
|
|
|
|
// we are about to watchdog, better to trigger a hardfault
|
|
|
|
// now and get a crash dump file
|
|
|
|
void *ptr = (void*)0xE000FFFF;
|
|
|
|
typedef void (*fptr)();
|
|
|
|
fptr gptr = (fptr) (void *)ptr;
|
|
|
|
gptr();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-05-17 03:51:36 -03:00
|
|
|
#if HAL_LOGGING_ENABLED
|
2020-04-28 03:54:56 -03:00
|
|
|
if (log_wd_counter++ == 10 && hal.util->was_watchdog_reset()) {
|
|
|
|
log_wd_counter = 0;
|
|
|
|
// log watchdog message once a second
|
|
|
|
const AP_HAL::Util::PersistentData &pd = hal.util->last_persistent_data;
|
2022-04-04 02:54:12 -03:00
|
|
|
struct log_WDOG wdog{
|
|
|
|
LOG_PACKET_HEADER_INIT(LOG_WDOG_MSG),
|
|
|
|
time_us : AP_HAL::micros64(),
|
|
|
|
scheduler_task : pd.scheduler_task,
|
|
|
|
internal_errors : pd.internal_errors,
|
|
|
|
internal_error_count : pd.internal_error_count,
|
|
|
|
internal_error_last_line : pd.internal_error_last_line,
|
|
|
|
last_mavlink_msgid : pd.last_mavlink_msgid,
|
|
|
|
last_mavlink_cmd : pd.last_mavlink_cmd,
|
|
|
|
semaphore_line : pd.semaphore_line,
|
|
|
|
fault_line : pd.fault_line,
|
|
|
|
fault_type : pd.fault_type,
|
|
|
|
fault_addr : pd.fault_addr,
|
|
|
|
fault_thd_prio : pd.fault_thd_prio,
|
|
|
|
fault_icsr : pd.fault_icsr,
|
|
|
|
fault_lr : pd.fault_lr
|
|
|
|
};
|
|
|
|
memcpy(wdog.thread_name4, pd.thread_name4, ARRAY_SIZE(wdog.thread_name4));
|
|
|
|
|
|
|
|
AP::logger().WriteCriticalBlock(&wdog, sizeof(wdog));
|
2020-04-28 03:54:56 -03:00
|
|
|
}
|
2021-05-17 03:51:36 -03:00
|
|
|
#endif // HAL_LOGGING_ENABLED
|
2020-04-28 03:54:56 -03:00
|
|
|
|
2020-09-21 05:29:40 -03:00
|
|
|
#ifndef IOMCU_FW
|
|
|
|
// setup GPIO interrupt quotas
|
|
|
|
hal.gpio->timer_tick();
|
|
|
|
#endif
|
2019-05-09 08:15:58 -03:00
|
|
|
}
|
|
|
|
}
|
2019-05-26 22:45:30 -03:00
|
|
|
#endif // HAL_NO_MONITOR_THREAD
|
2019-05-09 08:15:58 -03:00
|
|
|
|
2018-01-18 15:12:38 -04:00
|
|
|
void Scheduler::_rcin_thread(void *arg)
|
|
|
|
{
|
|
|
|
Scheduler *sched = (Scheduler *)arg;
|
2020-04-28 03:32:29 -03:00
|
|
|
chRegSetThreadName("rcin");
|
2018-01-18 15:12:38 -04:00
|
|
|
while (!sched->_hal_initialized) {
|
|
|
|
sched->delay_microseconds(20000);
|
|
|
|
}
|
|
|
|
while (true) {
|
2019-08-27 04:47:32 -03:00
|
|
|
sched->delay_microseconds(1000);
|
2018-01-13 00:02:05 -04:00
|
|
|
((RCInput *)hal.rcin)->_timer_tick();
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::_run_io(void)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
if (_in_io_proc) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
_in_io_proc = true;
|
|
|
|
|
2018-05-21 20:08:35 -03:00
|
|
|
int num_procs = 0;
|
|
|
|
chBSemWait(&_io_semaphore);
|
|
|
|
num_procs = _num_io_procs;
|
|
|
|
chBSemSignal(&_io_semaphore);
|
2018-05-13 19:47:14 -03:00
|
|
|
// now call the IO based drivers
|
2018-05-21 20:08:35 -03:00
|
|
|
for (int i = 0; i < num_procs; i++) {
|
2018-05-13 19:47:14 -03:00
|
|
|
if (_io_proc[i]) {
|
|
|
|
_io_proc[i]();
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_in_io_proc = false;
|
|
|
|
}
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::_io_thread(void* arg)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2018-01-13 00:02:05 -04:00
|
|
|
Scheduler *sched = (Scheduler *)arg;
|
2020-04-28 03:32:29 -03:00
|
|
|
chRegSetThreadName("io");
|
2018-01-05 02:19:51 -04:00
|
|
|
while (!sched->_hal_initialized) {
|
|
|
|
sched->delay_microseconds(1000);
|
|
|
|
}
|
2021-05-17 03:51:36 -03:00
|
|
|
#if HAL_LOGGING_ENABLED
|
2018-12-28 19:50:17 -04:00
|
|
|
uint32_t last_sd_start_ms = AP_HAL::millis();
|
2020-11-29 15:53:49 -04:00
|
|
|
#endif
|
|
|
|
#if CH_DBG_ENABLE_STACK_CHECK == TRUE
|
|
|
|
uint32_t last_stack_check_ms = 0;
|
2020-10-23 22:26:44 -03:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
while (true) {
|
|
|
|
sched->delay_microseconds(1000);
|
|
|
|
|
|
|
|
// run registered IO processes
|
|
|
|
sched->_run_io();
|
2018-12-28 19:50:17 -04:00
|
|
|
|
2021-05-17 03:51:36 -03:00
|
|
|
#if HAL_LOGGING_ENABLED || CH_DBG_ENABLE_STACK_CHECK == TRUE
|
2020-11-29 15:53:49 -04:00
|
|
|
uint32_t now = AP_HAL::millis();
|
|
|
|
#endif
|
|
|
|
|
2021-05-17 03:51:36 -03:00
|
|
|
#if HAL_LOGGING_ENABLED
|
2018-12-28 19:50:17 -04:00
|
|
|
if (!hal.util->get_soft_armed()) {
|
|
|
|
// if sdcard hasn't mounted then retry it every 3s in the IO
|
|
|
|
// thread when disarmed
|
|
|
|
if (now - last_sd_start_ms > 3000) {
|
2018-12-30 02:38:53 -04:00
|
|
|
last_sd_start_ms = now;
|
2020-10-23 22:26:44 -03:00
|
|
|
AP::FS().retry_mount();
|
2018-12-28 19:50:17 -04:00
|
|
|
}
|
|
|
|
}
|
2020-11-29 15:53:49 -04:00
|
|
|
#endif
|
|
|
|
#if CH_DBG_ENABLE_STACK_CHECK == TRUE
|
|
|
|
if (now - last_stack_check_ms > 1000) {
|
|
|
|
last_stack_check_ms = now;
|
2020-11-29 18:10:21 -04:00
|
|
|
sched->check_stack_free();
|
2020-11-29 15:53:49 -04:00
|
|
|
}
|
2020-10-23 22:26:44 -03:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-14 17:23:45 -04:00
|
|
|
#if defined(STM32H7)
|
|
|
|
/*
|
|
|
|
the H7 has 64k of ITCM memory at address zero. We reserve 1k of it
|
|
|
|
to prevent nullptr being valid. This function checks that memory is
|
|
|
|
always zero
|
|
|
|
*/
|
|
|
|
void Scheduler::check_low_memory_is_zero()
|
|
|
|
{
|
|
|
|
const uint32_t *lowmem = nullptr;
|
|
|
|
// we start at address 0x1 as reading address zero causes a fault
|
|
|
|
for (uint16_t i=1; i<256; i++) {
|
|
|
|
if (lowmem[i] != 0) {
|
|
|
|
// re-use memory guard internal error
|
|
|
|
AP_memory_guard_error(1023);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// we can't do address 0, but can check next 3 bytes
|
|
|
|
const uint8_t *addr0 = (const uint8_t *)0;
|
|
|
|
for (uint8_t i=1; i<4; i++) {
|
2023-01-11 22:33:15 -04:00
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Warray-bounds"
|
2020-11-14 17:23:45 -04:00
|
|
|
if (addr0[i] != 0) {
|
|
|
|
AP_memory_guard_error(1023);
|
|
|
|
break;
|
|
|
|
}
|
2023-01-11 22:33:15 -04:00
|
|
|
#pragma GCC diagnostic pop
|
2020-11-14 17:23:45 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // STM32H7
|
|
|
|
|
2018-01-13 00:02:05 -04:00
|
|
|
void Scheduler::_storage_thread(void* arg)
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
2018-01-13 00:02:05 -04:00
|
|
|
Scheduler *sched = (Scheduler *)arg;
|
2020-04-28 03:32:29 -03:00
|
|
|
chRegSetThreadName("storage");
|
2018-01-05 02:19:51 -04:00
|
|
|
while (!sched->_hal_initialized) {
|
|
|
|
sched->delay_microseconds(10000);
|
|
|
|
}
|
2020-11-14 17:23:45 -04:00
|
|
|
#if defined STM32H7
|
2021-07-01 18:26:43 -03:00
|
|
|
uint16_t memcheck_counter=0;
|
2020-11-14 17:23:45 -04:00
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
while (true) {
|
2021-07-01 18:26:43 -03:00
|
|
|
sched->delay_microseconds(1000);
|
2018-01-05 02:19:51 -04:00
|
|
|
|
|
|
|
// process any pending storage writes
|
2018-01-26 22:32:58 -04:00
|
|
|
hal.storage->_timer_tick();
|
2020-11-14 17:23:45 -04:00
|
|
|
|
|
|
|
#if defined STM32H7
|
2021-07-01 18:26:43 -03:00
|
|
|
if (memcheck_counter++ % 500 == 0) {
|
2020-11-14 17:23:45 -04:00
|
|
|
// run check at 2Hz
|
|
|
|
sched->check_low_memory_is_zero();
|
|
|
|
}
|
|
|
|
#endif
|
2018-01-05 02:19:51 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-23 07:26:59 -04:00
|
|
|
void Scheduler::set_system_initialized()
|
2018-01-05 02:19:51 -04:00
|
|
|
{
|
|
|
|
if (_initialized) {
|
2020-12-23 07:26:59 -04:00
|
|
|
AP_HAL::panic("PANIC: scheduler::set_system_initialized called"
|
2018-01-05 02:19:51 -04:00
|
|
|
"more than once");
|
|
|
|
}
|
|
|
|
_initialized = true;
|
|
|
|
}
|
|
|
|
|
2018-03-28 05:49:51 -03:00
|
|
|
/*
|
|
|
|
disable interrupts and return a context that can be used to
|
|
|
|
restore the interrupt state. This can be used to protect
|
|
|
|
critical regions
|
|
|
|
*/
|
|
|
|
void *Scheduler::disable_interrupts_save(void)
|
|
|
|
{
|
|
|
|
return (void *)(uintptr_t)chSysGetStatusAndLockX();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
restore interrupt state from disable_interrupts_save()
|
|
|
|
*/
|
|
|
|
void Scheduler::restore_interrupts(void *state)
|
|
|
|
{
|
|
|
|
chSysRestoreStatusX((syssts_t)(uintptr_t)state);
|
|
|
|
}
|
|
|
|
|
2018-07-06 20:59:10 -03:00
|
|
|
/*
|
|
|
|
trampoline for thread create
|
|
|
|
*/
|
|
|
|
void Scheduler::thread_create_trampoline(void *ctx)
|
|
|
|
{
|
|
|
|
AP_HAL::MemberProc *t = (AP_HAL::MemberProc *)ctx;
|
|
|
|
(*t)();
|
|
|
|
free(t);
|
|
|
|
}
|
|
|
|
|
2021-03-17 23:10:29 -03:00
|
|
|
// calculates an integer to be used as the priority for a newly-created thread
|
|
|
|
uint8_t Scheduler::calculate_thread_priority(priority_base base, int8_t priority) const
|
2018-07-06 20:59:10 -03:00
|
|
|
{
|
|
|
|
uint8_t thread_priority = APM_IO_PRIORITY;
|
|
|
|
static const struct {
|
|
|
|
priority_base base;
|
|
|
|
uint8_t p;
|
|
|
|
} priority_map[] = {
|
|
|
|
{ PRIORITY_BOOST, APM_MAIN_PRIORITY_BOOST},
|
|
|
|
{ PRIORITY_MAIN, APM_MAIN_PRIORITY},
|
|
|
|
{ PRIORITY_SPI, APM_SPI_PRIORITY},
|
|
|
|
{ PRIORITY_I2C, APM_I2C_PRIORITY},
|
|
|
|
{ PRIORITY_CAN, APM_CAN_PRIORITY},
|
|
|
|
{ PRIORITY_TIMER, APM_TIMER_PRIORITY},
|
2020-12-05 15:16:27 -04:00
|
|
|
{ PRIORITY_RCOUT, APM_RCOUT_PRIORITY},
|
2023-02-23 08:35:34 -04:00
|
|
|
{ PRIORITY_LED, APM_LED_PRIORITY},
|
2018-07-06 20:59:10 -03:00
|
|
|
{ PRIORITY_RCIN, APM_RCIN_PRIORITY},
|
|
|
|
{ PRIORITY_IO, APM_IO_PRIORITY},
|
|
|
|
{ PRIORITY_UART, APM_UART_PRIORITY},
|
|
|
|
{ PRIORITY_STORAGE, APM_STORAGE_PRIORITY},
|
2018-09-27 20:17:27 -03:00
|
|
|
{ PRIORITY_SCRIPTING, APM_SCRIPTING_PRIORITY},
|
2023-12-24 23:25:19 -04:00
|
|
|
{ PRIORITY_NET, APM_NET_PRIORITY},
|
2018-07-06 20:59:10 -03:00
|
|
|
};
|
|
|
|
for (uint8_t i=0; i<ARRAY_SIZE(priority_map); i++) {
|
|
|
|
if (priority_map[i].base == base) {
|
|
|
|
thread_priority = constrain_int16(priority_map[i].p + priority, LOWPRIO, HIGHPRIO);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-03-17 23:10:29 -03:00
|
|
|
return thread_priority;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
create a new thread
|
|
|
|
*/
|
|
|
|
bool Scheduler::thread_create(AP_HAL::MemberProc proc, const char *name, uint32_t stack_size, priority_base base, int8_t priority)
|
|
|
|
{
|
|
|
|
// take a copy of the MemberProc, it is freed after thread exits
|
|
|
|
AP_HAL::MemberProc *tproc = (AP_HAL::MemberProc *)malloc(sizeof(proc));
|
|
|
|
if (!tproc) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*tproc = proc;
|
|
|
|
|
|
|
|
const uint8_t thread_priority = calculate_thread_priority(base, priority);
|
|
|
|
|
2019-02-26 20:14:58 -04:00
|
|
|
thread_t *thread_ctx = thread_create_alloc(THD_WORKING_AREA_SIZE(stack_size),
|
2018-07-06 20:59:10 -03:00
|
|
|
name,
|
|
|
|
thread_priority,
|
|
|
|
thread_create_trampoline,
|
|
|
|
tproc);
|
|
|
|
if (thread_ctx == nullptr) {
|
|
|
|
free(tproc);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-04-11 08:12:03 -03:00
|
|
|
/*
|
|
|
|
inform the scheduler that we are calling an operation from the
|
|
|
|
main thread that may take an extended amount of time. This can
|
|
|
|
be used to prevent watchdog reset during expected long delays
|
|
|
|
A value of zero cancels the previous expected delay
|
|
|
|
*/
|
2020-04-12 08:04:26 -03:00
|
|
|
void Scheduler::_expect_delay_ms(uint32_t ms)
|
2019-04-11 08:12:03 -03:00
|
|
|
{
|
|
|
|
if (!in_main_thread()) {
|
|
|
|
// only for main thread
|
|
|
|
return;
|
|
|
|
}
|
2019-10-20 19:10:27 -03:00
|
|
|
|
|
|
|
// pat once immediately
|
|
|
|
watchdog_pat();
|
|
|
|
|
2020-04-12 08:04:26 -03:00
|
|
|
WITH_SEMAPHORE(expect_delay_sem);
|
|
|
|
|
2019-04-11 08:12:03 -03:00
|
|
|
if (ms == 0) {
|
2019-05-11 05:20:53 -03:00
|
|
|
if (expect_delay_nesting > 0) {
|
|
|
|
expect_delay_nesting--;
|
|
|
|
}
|
|
|
|
if (expect_delay_nesting == 0) {
|
|
|
|
expect_delay_start = 0;
|
|
|
|
}
|
2019-04-11 08:12:03 -03:00
|
|
|
} else {
|
2019-05-11 05:20:53 -03:00
|
|
|
uint32_t now = AP_HAL::millis();
|
|
|
|
if (expect_delay_start != 0) {
|
|
|
|
// we already have a delay running, possibly extend it
|
|
|
|
uint32_t done = now - expect_delay_start;
|
|
|
|
if (expect_delay_length > done) {
|
|
|
|
ms = MAX(ms, expect_delay_length - done);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
expect_delay_start = now;
|
2019-04-11 08:12:03 -03:00
|
|
|
expect_delay_length = ms;
|
2019-05-11 05:20:53 -03:00
|
|
|
expect_delay_nesting++;
|
2019-05-06 22:58:47 -03:00
|
|
|
|
|
|
|
// also put our priority below timer thread if we are boosted
|
|
|
|
boost_end();
|
2019-04-11 08:12:03 -03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-12 08:04:26 -03:00
|
|
|
/*
|
|
|
|
this is _expect_delay_ms() with check that we are in the main thread
|
|
|
|
*/
|
|
|
|
void Scheduler::expect_delay_ms(uint32_t ms)
|
|
|
|
{
|
|
|
|
if (!in_main_thread()) {
|
|
|
|
// only for main thread
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
_expect_delay_ms(ms);
|
|
|
|
}
|
|
|
|
|
2019-05-09 08:15:58 -03:00
|
|
|
// pat the watchdog
|
|
|
|
void Scheduler::watchdog_pat(void)
|
|
|
|
{
|
|
|
|
stm32_watchdog_pat();
|
|
|
|
last_watchdog_pat_ms = AP_HAL::millis();
|
2023-05-11 04:06:28 -03:00
|
|
|
#if defined(HAL_GPIO_PIN_EXT_WDOG)
|
|
|
|
ext_watchdog_pat(last_watchdog_pat_ms);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(HAL_GPIO_PIN_EXT_WDOG)
|
|
|
|
// toggle the external watchdog gpio pin
|
|
|
|
void Scheduler::ext_watchdog_pat(uint32_t now_ms)
|
|
|
|
{
|
|
|
|
// toggle watchdog GPIO every WDI_OUT_INTERVAL_TIME_MS
|
|
|
|
if ((now_ms - last_ext_watchdog_ms) >= EXT_WDOG_INTERVAL_MS) {
|
|
|
|
palToggleLine(HAL_GPIO_PIN_EXT_WDOG);
|
|
|
|
last_ext_watchdog_ms = now_ms;
|
|
|
|
}
|
2019-05-09 08:15:58 -03:00
|
|
|
}
|
2023-05-11 04:06:28 -03:00
|
|
|
#endif
|
2019-05-09 08:15:58 -03:00
|
|
|
|
2020-11-29 15:53:49 -04:00
|
|
|
#if CH_DBG_ENABLE_STACK_CHECK == TRUE
|
|
|
|
/*
|
|
|
|
check we have enough stack free on all threads and the ISR stack
|
|
|
|
*/
|
|
|
|
void Scheduler::check_stack_free(void)
|
|
|
|
{
|
2020-11-29 18:10:21 -04:00
|
|
|
// we raise an internal error stack_overflow when the available
|
|
|
|
// stack on any thread or the ISR stack drops below this
|
|
|
|
// threshold. This means we get an overflow error when we haven't
|
|
|
|
// yet completely run out of stack. This gives us a good
|
|
|
|
// pre-warning when we are getting too close
|
|
|
|
#if defined(STM32F1)
|
2020-11-29 15:53:49 -04:00
|
|
|
const uint32_t min_stack = 32;
|
2020-11-29 18:10:21 -04:00
|
|
|
#else
|
|
|
|
const uint32_t min_stack = 64;
|
|
|
|
#endif
|
2020-11-29 15:53:49 -04:00
|
|
|
|
2020-11-29 18:10:21 -04:00
|
|
|
if (stack_free(&__main_stack_base__) < min_stack) {
|
|
|
|
// use "line number" of 0xFFFF for ISR stack low
|
2023-08-15 01:14:02 -03:00
|
|
|
#if AP_INTERNALERROR_ENABLED
|
2020-11-29 18:10:21 -04:00
|
|
|
AP::internalerror().error(AP_InternalError::error_t::stack_overflow, 0xFFFF);
|
2023-08-15 01:14:02 -03:00
|
|
|
#endif
|
2020-11-29 15:53:49 -04:00
|
|
|
}
|
|
|
|
|
2020-11-29 18:10:21 -04:00
|
|
|
for (thread_t *tp = chRegFirstThread(); tp; tp = chRegNextThread(tp)) {
|
2020-11-29 15:53:49 -04:00
|
|
|
if (stack_free(tp->wabase) < min_stack) {
|
2020-11-29 18:10:21 -04:00
|
|
|
// use task priority for line number. This allows us to
|
|
|
|
// identify the task fairly reliably
|
2023-08-15 01:14:02 -03:00
|
|
|
#if AP_INTERNALERROR_ENABLED
|
2021-02-18 17:52:58 -04:00
|
|
|
AP::internalerror().error(AP_InternalError::error_t::stack_overflow, tp->realprio);
|
2023-08-15 01:14:02 -03:00
|
|
|
#endif
|
2020-11-29 15:53:49 -04:00
|
|
|
}
|
2020-11-29 18:10:21 -04:00
|
|
|
}
|
2020-11-29 15:53:49 -04:00
|
|
|
}
|
|
|
|
#endif // CH_DBG_ENABLE_STACK_CHECK == TRUE
|
|
|
|
|
2018-03-06 18:41:03 -04:00
|
|
|
#endif // CH_CFG_USE_DYNAMIC
|
2022-08-18 03:45:30 -03:00
|
|
|
|
2023-06-15 20:54:23 -03:00
|
|
|
/*
|
|
|
|
try to avoid watchdog during a locking deadlock by force releasing a
|
|
|
|
mutex that is blocking the main thread
|
|
|
|
*/
|
|
|
|
void Scheduler::try_force_mutex(void)
|
|
|
|
{
|
|
|
|
#if HAL_LOGGING_ENABLED
|
|
|
|
chSysLock();
|
|
|
|
thread_t *main_thread = get_main_thread();
|
|
|
|
|
|
|
|
if (main_thread == nullptr || main_thread->state != CH_STATE_WTMTX) {
|
|
|
|
chSysUnlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_t *wtmtx = main_thread->u.wtmtxp;
|
|
|
|
if (wtmtx == nullptr || wtmtx->owner == nullptr) {
|
|
|
|
chSysUnlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
char thdname[17] {};
|
|
|
|
uint16_t sem_line = hal.util->persistent_data.semaphore_line;
|
|
|
|
strncpy(thdname, wtmtx->owner->name, sizeof(thdname)-1);
|
|
|
|
|
|
|
|
// we will force release the lock
|
|
|
|
chMtxForceReleaseS(wtmtx);
|
|
|
|
chSysUnlock();
|
|
|
|
|
|
|
|
// log a DLCK message with information on the deadlock we have avoided
|
|
|
|
AP::logger().WriteCritical("DLCK", "TimeUS,SemLine,ThdName,MtxP", "QHNI",
|
|
|
|
AP_HAL::micros64(),
|
|
|
|
sem_line,
|
|
|
|
thdname,
|
|
|
|
unsigned(wtmtx));
|
|
|
|
GCS_SEND_TEXT(MAV_SEVERITY_CRITICAL, "CRITICAL Deadlock %u %s %p", sem_line, thdname, wtmtx);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-08-18 03:45:30 -03:00
|
|
|
#endif // HAL_SCHEDULER_ENABLED
|