2013-04-13 21:50:47 +02:00
|
|
|
/**
|
|
|
|
******************************************************************************
|
|
|
|
*
|
2014-02-02 22:08:16 +01:00
|
|
|
* @file pios_callbackscheduler.c
|
2013-04-14 04:16:34 +02:00
|
|
|
* @author The OpenPilot Team, http://www.openpilot.org Copyright (C) 2013.
|
2013-04-13 21:50:47 +02:00
|
|
|
* @brief Scheduler to run callback functions from a shared context with given priorities.
|
|
|
|
*
|
|
|
|
* @see The GNU Public License (GPL) Version 3
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*/
|
|
|
|
|
2014-02-02 22:08:16 +01:00
|
|
|
#include <pios.h>
|
|
|
|
#ifdef PIOS_INCLUDE_CALLBACKSCHEDULER
|
2013-05-06 11:41:14 +02:00
|
|
|
|
2014-02-02 22:08:16 +01:00
|
|
|
#include <utlist.h>
|
|
|
|
#include <uavobjectmanager.h>
|
2013-05-06 11:41:14 +02:00
|
|
|
#include <taskinfo.h>
|
2013-04-13 21:50:47 +02:00
|
|
|
|
|
|
|
// Private constants
|
2014-02-06 23:54:30 +01:00
|
|
|
#define STACK_SAFETYCOUNT 16
|
2015-04-15 15:21:18 +02:00
|
|
|
#define STACK_SIZE (300 + STACK_SAFETYSIZE)
|
2014-02-06 23:54:30 +01:00
|
|
|
#define STACK_SAFETYSIZE 8
|
2014-02-03 01:00:07 +01:00
|
|
|
#define MAX_SLEEP 1000
|
2013-04-13 21:50:47 +02:00
|
|
|
|
|
|
|
// Private types
|
2013-04-14 04:16:34 +02:00
|
|
|
/**
|
|
|
|
* task information
|
|
|
|
*/
|
|
|
|
struct DelayedCallbackTaskStruct {
|
2013-05-19 16:37:30 +02:00
|
|
|
DelayedCallbackInfo *callbackQueue[CALLBACK_PRIORITY_LOW + 1];
|
|
|
|
DelayedCallbackInfo *queueCursor[CALLBACK_PRIORITY_LOW + 1];
|
|
|
|
xTaskHandle callbackSchedulerTaskHandle;
|
2014-03-23 18:45:10 +01:00
|
|
|
char name[3];
|
2013-05-19 16:37:30 +02:00
|
|
|
uint32_t stackSize;
|
|
|
|
DelayedCallbackPriorityTask priorityTask;
|
|
|
|
xSemaphoreHandle signal;
|
|
|
|
struct DelayedCallbackTaskStruct *next;
|
2013-04-14 04:16:34 +02:00
|
|
|
};
|
|
|
|
|
2013-04-13 21:50:47 +02:00
|
|
|
/**
|
|
|
|
* callback information
|
|
|
|
*/
|
|
|
|
struct DelayedCallbackInfoStruct {
|
2013-05-19 16:37:30 +02:00
|
|
|
DelayedCallback cb;
|
2014-02-02 19:54:17 +01:00
|
|
|
int16_t callbackID;
|
2013-05-19 16:37:30 +02:00
|
|
|
bool volatile waiting;
|
|
|
|
uint32_t volatile scheduletime;
|
2014-02-03 01:00:07 +01:00
|
|
|
uint32_t stackSize;
|
|
|
|
int32_t stackFree;
|
2014-02-06 23:54:30 +01:00
|
|
|
int32_t stackNotFree;
|
|
|
|
uint16_t stackSafetyCount;
|
|
|
|
uint16_t currentSafetyCount;
|
2014-02-03 01:00:07 +01:00
|
|
|
uint32_t runCount;
|
2013-05-19 16:37:30 +02:00
|
|
|
struct DelayedCallbackTaskStruct *task;
|
|
|
|
struct DelayedCallbackInfoStruct *next;
|
2013-04-13 21:50:47 +02:00
|
|
|
};
|
|
|
|
|
2013-04-14 04:16:34 +02:00
|
|
|
|
2013-04-13 21:50:47 +02:00
|
|
|
// Private variables
|
2013-04-14 04:16:34 +02:00
|
|
|
static struct DelayedCallbackTaskStruct *schedulerTasks;
|
2013-04-13 21:50:47 +02:00
|
|
|
static xSemaphoreHandle mutex;
|
2013-04-14 04:16:34 +02:00
|
|
|
static bool schedulerStarted;
|
2013-04-13 21:50:47 +02:00
|
|
|
|
|
|
|
// Private functions
|
2013-04-14 04:16:34 +02:00
|
|
|
static void CallbackSchedulerTask(void *task);
|
2013-04-18 14:17:29 +02:00
|
|
|
static int32_t runNextCallback(struct DelayedCallbackTaskStruct *task, DelayedCallbackPriority priority);
|
2013-04-13 21:50:47 +02:00
|
|
|
|
|
|
|
/**
|
2013-04-14 04:16:34 +02:00
|
|
|
* Initialize the scheduler
|
2013-04-19 09:07:35 +02:00
|
|
|
* must be called before any other functions are called
|
2013-04-13 21:50:47 +02:00
|
|
|
* \return Success (0), failure (-1)
|
|
|
|
*/
|
2014-02-02 22:08:16 +01:00
|
|
|
int32_t PIOS_CALLBACKSCHEDULER_Initialize()
|
2013-04-13 21:50:47 +02:00
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
// Initialize variables
|
|
|
|
schedulerTasks = NULL;
|
|
|
|
schedulerStarted = false;
|
|
|
|
|
|
|
|
// Create mutex
|
|
|
|
mutex = xSemaphoreCreateRecursiveMutex();
|
|
|
|
if (mutex == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done
|
|
|
|
return 0;
|
2013-04-13 21:50:47 +02:00
|
|
|
}
|
|
|
|
|
2013-04-14 04:16:34 +02:00
|
|
|
/**
|
2013-04-19 09:07:35 +02:00
|
|
|
* Start all scheduler tasks
|
|
|
|
* Will instantiate all scheduler tasks registered so far. Although new
|
|
|
|
* callbacks CAN be registered beyond that point, any further scheduling tasks
|
|
|
|
* will be started the moment of instantiation. It is not possible to increase
|
|
|
|
* the STACK requirements of a scheduler task after this function has been
|
|
|
|
* run. No callbacks will be run before this function is called, although
|
|
|
|
* they can be marked for later execution by executing the dispatch function.
|
2013-04-14 04:16:34 +02:00
|
|
|
* \return Success (0), failure (-1)
|
|
|
|
*/
|
2014-02-02 22:08:16 +01:00
|
|
|
int32_t PIOS_CALLBACKSCHEDULER_Start()
|
2013-04-13 21:50:47 +02:00
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
|
|
|
|
|
|
|
|
// only call once
|
|
|
|
PIOS_Assert(schedulerStarted == false);
|
|
|
|
|
|
|
|
// start tasks
|
|
|
|
struct DelayedCallbackTaskStruct *cursor = NULL;
|
|
|
|
int t = 0;
|
|
|
|
LL_FOREACH(schedulerTasks, cursor) {
|
|
|
|
xTaskCreate(
|
|
|
|
CallbackSchedulerTask,
|
|
|
|
cursor->name,
|
2014-02-03 01:00:07 +01:00
|
|
|
1 + (cursor->stackSize / 4),
|
2013-05-19 16:37:30 +02:00
|
|
|
cursor,
|
|
|
|
cursor->priorityTask,
|
|
|
|
&cursor->callbackSchedulerTaskHandle
|
|
|
|
);
|
|
|
|
if (TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t <= TASKINFO_RUNNING_CALLBACKSCHEDULER3) {
|
|
|
|
PIOS_TASK_MONITOR_RegisterTask(TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t, cursor->callbackSchedulerTaskHandle);
|
|
|
|
}
|
|
|
|
t++;
|
|
|
|
}
|
|
|
|
|
|
|
|
schedulerStarted = true;
|
|
|
|
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
|
|
|
|
return 0;
|
2013-04-13 21:50:47 +02:00
|
|
|
}
|
|
|
|
|
2013-04-18 14:17:29 +02:00
|
|
|
/**
|
|
|
|
* Schedule dispatching a callback at some point in the future. The function returns immediately.
|
2013-04-19 09:07:35 +02:00
|
|
|
* \param[in] *cbinfo the callback handle
|
2013-04-18 14:17:29 +02:00
|
|
|
* \param[in] milliseconds How far in the future to dispatch the callback
|
|
|
|
* \param[in] updatemode What to do if the callback is already scheduled but not dispatched yet.
|
|
|
|
* The options are:
|
|
|
|
* UPDATEMODE_NONE: An existing schedule will not be touched, the call will have no effect at all if there's an existing schedule.
|
|
|
|
* UPDATEMODE_SOONER: The callback will be rescheduled only if the new schedule triggers before the original one would have triggered.
|
|
|
|
* UPDATEMODE_LATER: The callback will be rescheduled only if the new schedule triggers after the original one would have triggered.
|
|
|
|
* UPDATEMODE_OVERRIDE: The callback will be rescheduled in any case, effectively overriding any previous schedule. (sooner+later=override)
|
|
|
|
* \return 0: not scheduled, previous schedule takes precedence, 1: new schedule, 2: previous schedule overridden
|
|
|
|
*/
|
2014-02-02 22:08:16 +01:00
|
|
|
int32_t PIOS_CALLBACKSCHEDULER_Schedule(
|
2013-05-19 16:37:30 +02:00
|
|
|
DelayedCallbackInfo *cbinfo,
|
|
|
|
int32_t milliseconds,
|
|
|
|
DelayedCallbackUpdateMode updatemode)
|
2013-04-18 14:17:29 +02:00
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
int32_t result = 0;
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
PIOS_Assert(cbinfo);
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
if (milliseconds <= 0) {
|
|
|
|
milliseconds = 0; // we can and will not schedule in the past since that ruins the wraparound of uint32_t
|
|
|
|
}
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
uint32_t new = xTaskGetTickCount() + (milliseconds / portTICK_RATE_MS);
|
|
|
|
if (!new) {
|
|
|
|
new = 1; // zero has a special meaning, schedule at time 1 instead
|
|
|
|
}
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
int32_t diff = new - cbinfo->scheduletime;
|
|
|
|
if ((!cbinfo->scheduletime)
|
|
|
|
|| ((updatemode & CALLBACK_UPDATEMODE_SOONER) && diff < 0)
|
|
|
|
|| ((updatemode & CALLBACK_UPDATEMODE_LATER) && diff > 0)
|
|
|
|
) {
|
|
|
|
// the scheduletime may be updated
|
|
|
|
if (!cbinfo->scheduletime) {
|
|
|
|
result = 1;
|
|
|
|
} else {
|
|
|
|
result = 2;
|
|
|
|
}
|
|
|
|
cbinfo->scheduletime = new;
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
// scheduler needs to be notified to adapt sleep times
|
|
|
|
xSemaphoreGive(cbinfo->task->signal);
|
|
|
|
}
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
xSemaphoreGiveRecursive(mutex);
|
2013-04-18 14:17:29 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
return result;
|
2013-04-18 14:17:29 +02:00
|
|
|
}
|
|
|
|
|
2013-04-13 21:50:47 +02:00
|
|
|
/**
|
|
|
|
* Dispatch an event by invoking the supplied callback. The function
|
2013-04-18 14:17:29 +02:00
|
|
|
* returns immediately, the callback is invoked from the event task.
|
2013-04-13 21:50:47 +02:00
|
|
|
* \param[in] cbinfo the callback handle
|
2013-04-14 04:16:34 +02:00
|
|
|
* \return Success (-1), failure (0)
|
2013-04-13 21:50:47 +02:00
|
|
|
*/
|
2014-02-02 22:08:16 +01:00
|
|
|
int32_t PIOS_CALLBACKSCHEDULER_Dispatch(DelayedCallbackInfo *cbinfo)
|
2013-04-13 21:50:47 +02:00
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
PIOS_Assert(cbinfo);
|
2013-04-14 04:16:34 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
// no semaphore needed for the callback
|
|
|
|
cbinfo->waiting = true;
|
|
|
|
// but the scheduler as a whole needs to be notified
|
|
|
|
return xSemaphoreGive(cbinfo->task->signal);
|
2013-04-14 04:16:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dispatch an event by invoking the supplied callback. The function
|
2013-04-18 14:17:29 +02:00
|
|
|
* returns immediately, the callback is invoked from the event task.
|
2013-04-14 04:16:34 +02:00
|
|
|
* \param[in] cbinfo the callback handle
|
|
|
|
* \param[in] pxHigherPriorityTaskWoken
|
|
|
|
* xSemaphoreGiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE if
|
|
|
|
* giving the semaphore caused a task to unblock, and the unblocked task has a
|
|
|
|
* priority higher than the currently running task. If xSemaphoreGiveFromISR()
|
|
|
|
* sets this value to pdTRUE then a context switch should be requested before
|
|
|
|
* the interrupt is exited.
|
|
|
|
* From FreeRTOS Docu: Context switching from an ISR uses port specific syntax.
|
|
|
|
* Check the demo task for your port to find the syntax required.
|
|
|
|
* \return Success (-1), failure (0)
|
|
|
|
*/
|
2014-02-02 22:08:16 +01:00
|
|
|
int32_t PIOS_CALLBACKSCHEDULER_DispatchFromISR(DelayedCallbackInfo *cbinfo, long *pxHigherPriorityTaskWoken)
|
2013-04-14 04:16:34 +02:00
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
PIOS_Assert(cbinfo);
|
2013-04-14 04:16:34 +02:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
// no semaphore needed for the callback
|
|
|
|
cbinfo->waiting = true;
|
|
|
|
// but the scheduler as a whole needs to be notified
|
|
|
|
return xSemaphoreGiveFromISR(cbinfo->task->signal, pxHigherPriorityTaskWoken);
|
2013-04-13 21:50:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-04-19 17:25:42 +02:00
|
|
|
* Register a new callback to be called by a delayed callback scheduler task.
|
|
|
|
* If a scheduler task with the specified task priority does not exist yet, it
|
|
|
|
* will be created.
|
2013-04-13 21:50:47 +02:00
|
|
|
* \param[in] cb The callback to be invoked
|
2013-04-19 17:25:42 +02:00
|
|
|
* \param[in] priority Priority of the callback compared to other callbacks scheduled by the same delayed callback scheduler task.
|
2013-05-06 11:41:14 +02:00
|
|
|
* \param[in] priorityTask Task priority of the scheduler task. One scheduler task will be spawned for each distinct value specified,
|
|
|
|
* further callbacks created with the same priorityTask will all be handled by the same delayed callback scheduler task
|
|
|
|
* and scheduled according to their individual callback priorities
|
2013-04-14 04:16:34 +02:00
|
|
|
* \param[in] stacksize The stack requirements of the callback when called by the scheduler.
|
2013-04-19 09:07:35 +02:00
|
|
|
* \return CallbackInfo Pointer on success, NULL if failed.
|
2013-04-13 21:50:47 +02:00
|
|
|
*/
|
2014-02-02 22:08:16 +01:00
|
|
|
DelayedCallbackInfo *PIOS_CALLBACKSCHEDULER_Create(
|
2013-05-19 16:37:30 +02:00
|
|
|
DelayedCallback cb,
|
|
|
|
DelayedCallbackPriority priority,
|
|
|
|
DelayedCallbackPriorityTask priorityTask,
|
2014-02-02 19:54:17 +01:00
|
|
|
int16_t callbackID,
|
2013-05-19 16:37:30 +02:00
|
|
|
uint32_t stacksize)
|
2013-04-13 21:50:47 +02:00
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
|
|
|
|
|
2014-02-03 01:00:07 +01:00
|
|
|
// add callback schedulers own stack requirements
|
|
|
|
stacksize += STACK_SIZE;
|
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
// find appropriate scheduler task matching priorityTask
|
|
|
|
struct DelayedCallbackTaskStruct *task = NULL;
|
|
|
|
int t = 0;
|
|
|
|
LL_FOREACH(schedulerTasks, task) {
|
|
|
|
if (task->priorityTask == priorityTask) {
|
|
|
|
break; // found
|
|
|
|
}
|
|
|
|
t++;
|
|
|
|
}
|
|
|
|
// if given priorityTask does not exist, create it
|
|
|
|
if (!task) {
|
|
|
|
// allocate memory if possible
|
2014-06-11 20:11:48 +02:00
|
|
|
task = (struct DelayedCallbackTaskStruct *)pios_malloc(sizeof(struct DelayedCallbackTaskStruct));
|
2013-05-19 16:37:30 +02:00
|
|
|
if (!task) {
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// initialize structure
|
|
|
|
for (DelayedCallbackPriority p = 0; p <= CALLBACK_PRIORITY_LOW; p++) {
|
|
|
|
task->callbackQueue[p] = NULL;
|
|
|
|
task->queueCursor[p] = NULL;
|
|
|
|
}
|
|
|
|
task->name[0] = 'C';
|
|
|
|
task->name[1] = 'a' + t;
|
|
|
|
task->name[2] = 0;
|
2014-02-03 01:00:07 +01:00
|
|
|
task->stackSize = stacksize;
|
2013-05-19 16:37:30 +02:00
|
|
|
task->priorityTask = priorityTask;
|
|
|
|
task->next = NULL;
|
|
|
|
|
|
|
|
// create the signaling semaphore
|
|
|
|
vSemaphoreCreateBinary(task->signal);
|
|
|
|
if (!task->signal) {
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// add to list of scheduler tasks
|
|
|
|
LL_APPEND(schedulerTasks, task);
|
|
|
|
|
2014-02-02 22:08:16 +01:00
|
|
|
// Previously registered tasks are spawned when PIOS_CALLBACKSCHEDULER_Start() is called.
|
2013-05-19 16:37:30 +02:00
|
|
|
// Tasks registered afterwards need to spawn upon creation.
|
|
|
|
if (schedulerStarted) {
|
|
|
|
xTaskCreate(
|
|
|
|
CallbackSchedulerTask,
|
|
|
|
task->name,
|
2014-02-03 01:00:07 +01:00
|
|
|
1 + (task->stackSize / 4),
|
2013-05-19 16:37:30 +02:00
|
|
|
task,
|
|
|
|
task->priorityTask,
|
|
|
|
&task->callbackSchedulerTaskHandle
|
|
|
|
);
|
|
|
|
if (TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t <= TASKINFO_RUNNING_CALLBACKSCHEDULER3) {
|
|
|
|
PIOS_TASK_MONITOR_RegisterTask(TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t, task->callbackSchedulerTaskHandle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!schedulerStarted && stacksize > task->stackSize) {
|
|
|
|
task->stackSize = stacksize; // previous to task initialisation we can still adapt to the maximum needed stack
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (stacksize > task->stackSize) {
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
return NULL; // error - not enough memory
|
|
|
|
}
|
|
|
|
|
|
|
|
// initialize callback scheduling info
|
2014-06-11 20:11:48 +02:00
|
|
|
DelayedCallbackInfo *info = (DelayedCallbackInfo *)pios_malloc(sizeof(DelayedCallbackInfo));
|
2013-05-19 16:37:30 +02:00
|
|
|
if (!info) {
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
return NULL; // error - not enough memory
|
|
|
|
}
|
2014-02-06 23:54:30 +01:00
|
|
|
info->next = NULL;
|
|
|
|
info->waiting = false;
|
|
|
|
info->scheduletime = 0;
|
|
|
|
info->task = task;
|
2013-05-19 16:37:30 +02:00
|
|
|
info->cb = cb;
|
2014-02-06 23:54:30 +01:00
|
|
|
info->callbackID = callbackID;
|
|
|
|
info->runCount = 0;
|
2014-02-07 22:57:31 +01:00
|
|
|
info->stackSize = stacksize - STACK_SIZE;
|
|
|
|
info->stackNotFree = info->stackSize;
|
2014-02-06 23:54:30 +01:00
|
|
|
info->stackFree = 0;
|
|
|
|
info->stackSafetyCount = STACK_SAFETYCOUNT;
|
|
|
|
info->currentSafetyCount = 0;
|
2013-05-19 16:37:30 +02:00
|
|
|
|
|
|
|
// add to scheduling queue
|
|
|
|
LL_APPEND(task->callbackQueue[priority], info);
|
|
|
|
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
|
|
|
|
return info;
|
2013-04-13 21:50:47 +02:00
|
|
|
}
|
|
|
|
|
2014-02-06 23:54:30 +01:00
|
|
|
/**
|
2014-02-07 22:34:27 +01:00
|
|
|
* Iterator. Iterates over all callbacks and all scheduler tasks and retrieves information
|
|
|
|
*
|
|
|
|
* @param[in] callback Callback function to receive the data - will be called in same task context as the callerThe id of the task the task_info refers to.
|
|
|
|
* @param context Context information optionally provided to the callback.
|
2014-02-06 23:54:30 +01:00
|
|
|
*/
|
2014-02-07 22:34:27 +01:00
|
|
|
void PIOS_CALLBACKSCHEDULER_ForEachCallback(CallbackSchedulerCallbackInfoCallback callback, void *context)
|
2014-02-06 23:54:30 +01:00
|
|
|
{
|
2014-02-07 22:34:27 +01:00
|
|
|
if (!callback) {
|
|
|
|
return;
|
2014-02-06 23:54:30 +01:00
|
|
|
}
|
|
|
|
|
2014-02-07 22:34:27 +01:00
|
|
|
struct pios_callback_info info;
|
2014-02-06 23:54:30 +01:00
|
|
|
|
|
|
|
struct DelayedCallbackTaskStruct *task = NULL;
|
|
|
|
LL_FOREACH(schedulerTasks, task) {
|
|
|
|
int prio;
|
|
|
|
|
|
|
|
for (prio = 0; prio < (CALLBACK_PRIORITY_LOW + 1); prio++) {
|
|
|
|
struct DelayedCallbackInfoStruct *cbinfo;
|
|
|
|
LL_FOREACH(task->callbackQueue[prio], cbinfo) {
|
2014-02-07 22:34:27 +01:00
|
|
|
xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
|
|
|
|
info.is_running = true;
|
|
|
|
info.stack_remaining = cbinfo->stackNotFree;
|
|
|
|
info.running_time_count = cbinfo->runCount;
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
callback(cbinfo->callbackID, &info, context);
|
2014-02-06 23:54:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-02-03 01:00:07 +01:00
|
|
|
|
|
|
|
/**
|
2014-02-06 23:54:30 +01:00
|
|
|
* Stack magic, find how much stack is being used without affecting performance
|
2014-02-03 01:00:07 +01:00
|
|
|
*/
|
2014-02-06 23:54:30 +01:00
|
|
|
static void markStack(DelayedCallbackInfo *current)
|
2014-02-03 01:00:07 +01:00
|
|
|
{
|
2014-02-06 23:54:30 +01:00
|
|
|
register int8_t t;
|
|
|
|
register int32_t halfWayMark;
|
|
|
|
volatile unsigned char *marker;
|
|
|
|
|
2014-02-07 22:57:31 +01:00
|
|
|
if (current->stackNotFree < 0) {
|
2014-02-06 23:54:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// end of stack watermark
|
|
|
|
marker = (unsigned char *)(((size_t)&marker) - (size_t)current->stackSize);
|
|
|
|
for (t = -STACK_SAFETYSIZE; t < 0; t++) {
|
|
|
|
*(marker + t) = '#';
|
|
|
|
}
|
|
|
|
// shifted watermarks
|
|
|
|
halfWayMark = current->stackFree + (current->stackNotFree - current->stackFree) / 2;
|
|
|
|
marker = (unsigned char *)((size_t)marker + halfWayMark);
|
|
|
|
for (t = -STACK_SAFETYSIZE; t < 0; t++) {
|
|
|
|
*(marker + t) = '#';
|
|
|
|
}
|
2014-02-04 17:53:51 +01:00
|
|
|
}
|
2014-02-06 23:54:30 +01:00
|
|
|
static void checkStack(DelayedCallbackInfo *current)
|
2014-02-04 17:53:51 +01:00
|
|
|
{
|
2014-02-06 23:54:30 +01:00
|
|
|
register int8_t t;
|
|
|
|
register int32_t halfWayMark;
|
|
|
|
volatile unsigned char *marker;
|
|
|
|
|
2014-02-07 22:57:31 +01:00
|
|
|
if (current->stackNotFree < 0) {
|
2014-02-06 23:54:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// end of stack watermark
|
|
|
|
marker = (unsigned char *)(((size_t)&marker) - (size_t)current->stackSize);
|
|
|
|
for (t = -STACK_SAFETYSIZE; t < 0; t++) {
|
|
|
|
if (*(marker + t) != '#') {
|
2014-02-07 22:57:31 +01:00
|
|
|
current->stackNotFree = -1; // stack overflow, disable all further checks
|
2014-02-06 23:54:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// shifted watermarks
|
|
|
|
halfWayMark = current->stackFree + (current->stackNotFree - current->stackFree) / 2;
|
|
|
|
marker = (unsigned char *)((size_t)marker + halfWayMark);
|
|
|
|
for (t = -STACK_SAFETYSIZE; t < 0; t++) {
|
|
|
|
if (*(marker + t) != '#') {
|
|
|
|
current->stackNotFree = halfWayMark; // tainted mark, this place is definitely used stack
|
|
|
|
current->currentSafetyCount = 0;
|
|
|
|
if (current->stackNotFree <= current->stackFree) {
|
|
|
|
current->stackFree = 0; // if it was supposed to be free, restart search between here and bottom of stack
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (current->currentSafetyCount < 0xffff) {
|
|
|
|
current->currentSafetyCount++; // mark has not been tainted, increase safety counter
|
|
|
|
}
|
|
|
|
if (current->currentSafetyCount >= current->stackSafetyCount) { // if the safety counter is above the limit, then
|
|
|
|
if (halfWayMark == current->stackFree) { // check if search already converged, if so increase the limit to find very rare stack usage incidents
|
|
|
|
current->stackSafetyCount = current->currentSafetyCount;
|
|
|
|
} else {
|
|
|
|
current->stackFree = halfWayMark; // otherwise just mark this position as free stack to narrow search
|
|
|
|
current->currentSafetyCount = 0;
|
|
|
|
}
|
|
|
|
}
|
2014-02-03 01:00:07 +01:00
|
|
|
}
|
|
|
|
|
2013-04-13 21:50:47 +02:00
|
|
|
/**
|
|
|
|
* Scheduler subtask
|
2013-04-18 14:17:29 +02:00
|
|
|
* \param[in] task The scheduler task in question
|
2013-04-13 21:50:47 +02:00
|
|
|
* \param[in] priority The scheduling priority of the callback to search for
|
2013-04-18 14:17:29 +02:00
|
|
|
* \return wait time until next scheduled callback is due - 0 if a callback has just been executed
|
2013-04-13 21:50:47 +02:00
|
|
|
*/
|
2013-04-19 09:07:35 +02:00
|
|
|
static int32_t runNextCallback(struct DelayedCallbackTaskStruct *task, DelayedCallbackPriority priority)
|
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
int32_t result = MAX_SLEEP;
|
|
|
|
int32_t diff = 0;
|
|
|
|
|
|
|
|
// no such queue
|
|
|
|
if (priority > CALLBACK_PRIORITY_LOW) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// queue is empty, search a lower priority queue
|
|
|
|
if (task->callbackQueue[priority] == NULL) {
|
|
|
|
return runNextCallback(task, priority + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
DelayedCallbackInfo *current = task->queueCursor[priority];
|
|
|
|
DelayedCallbackInfo *next;
|
|
|
|
do {
|
|
|
|
if (current == NULL) {
|
|
|
|
next = task->callbackQueue[priority]; // loop around the end of the list
|
|
|
|
// also attempt to run a callback that has lower priority
|
|
|
|
// every time the queue is completely traversed
|
|
|
|
diff = runNextCallback(task, priority + 1);
|
|
|
|
if (!diff) {
|
|
|
|
task->queueCursor[priority] = next; // the recursive call has executed a callback
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (diff < result) {
|
|
|
|
result = diff; // adjust sleep time
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
next = current->next;
|
|
|
|
xSemaphoreTakeRecursive(mutex, portMAX_DELAY); // access to scheduletime should be mutex protected
|
|
|
|
if (current->scheduletime) {
|
|
|
|
diff = current->scheduletime - xTaskGetTickCount();
|
|
|
|
if (diff <= 0) {
|
|
|
|
current->waiting = true;
|
|
|
|
} else if (diff < result) {
|
|
|
|
result = diff; // adjust sleep time
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (current->waiting) {
|
|
|
|
task->queueCursor[priority] = next;
|
|
|
|
current->scheduletime = 0; // any schedules are reset
|
|
|
|
current->waiting = false; // the flag is reset just before execution.
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
2014-02-03 01:00:07 +01:00
|
|
|
|
|
|
|
/* callback gets invoked here - check stack sizes */
|
2014-02-06 23:54:30 +01:00
|
|
|
markStack(current);
|
2014-02-03 01:00:07 +01:00
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
current->cb(); // call the callback
|
2014-02-03 01:00:07 +01:00
|
|
|
|
2014-02-06 23:54:30 +01:00
|
|
|
checkStack(current);
|
2014-02-04 17:53:51 +01:00
|
|
|
|
2014-02-03 01:00:07 +01:00
|
|
|
current->runCount++;
|
|
|
|
|
2013-05-19 16:37:30 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
xSemaphoreGiveRecursive(mutex);
|
|
|
|
}
|
|
|
|
current = next;
|
|
|
|
} while (current != task->queueCursor[priority]);
|
|
|
|
// once the list has been traversed entirely without finding any to be executed task, abort (nothing to do)
|
|
|
|
return result;
|
2013-04-13 21:50:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Scheduler task, responsible of invoking callbacks.
|
2013-04-14 04:16:34 +02:00
|
|
|
* \param[in] task The scheduling task being run
|
2013-04-13 21:50:47 +02:00
|
|
|
*/
|
2013-04-14 04:16:34 +02:00
|
|
|
static void CallbackSchedulerTask(void *task)
|
2013-04-13 21:50:47 +02:00
|
|
|
{
|
2013-05-19 16:37:30 +02:00
|
|
|
uint32_t delay = 0;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
delay = runNextCallback((struct DelayedCallbackTaskStruct *)task, CALLBACK_PRIORITY_CRITICAL);
|
|
|
|
if (delay) {
|
|
|
|
// nothing to do but sleep
|
|
|
|
xSemaphoreTake(((struct DelayedCallbackTaskStruct *)task)->signal, delay);
|
|
|
|
}
|
|
|
|
}
|
2013-04-13 21:50:47 +02:00
|
|
|
}
|
2014-02-02 22:08:16 +01:00
|
|
|
|
|
|
|
#endif // ifdef PIOS_INCLUDE_CALLBACKSCHEDULER
|