1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
4 * SPDX-License-Identifier: Apache-2.0
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
21 *---------------------------------------------------------------------------*/
25 #include "cmsis_os2.h" // ::CMSIS:RTOS2
26 #include "cmsis_compiler.h" // Compiler agnostic definitions
27 #include "os_tick.h" // OS Tick API
29 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
30 #include "task.h" // ARM.FreeRTOS::RTOS:Core
31 #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
32 #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
34 #include "freertos_mpool.h" // osMemoryPool definitions
35 #include "freertos_os2.h" // Configuration check and setup
37 /*---------------------------------------------------------------------------*/
38 #ifndef __ARM_ARCH_6M__
39 #define __ARM_ARCH_6M__ 0
41 #ifndef __ARM_ARCH_7M__
42 #define __ARM_ARCH_7M__ 0
44 #ifndef __ARM_ARCH_7EM__
45 #define __ARM_ARCH_7EM__ 0
47 #ifndef __ARM_ARCH_8M_MAIN__
48 #define __ARM_ARCH_8M_MAIN__ 0
50 #ifndef __ARM_ARCH_7A__
51 #define __ARM_ARCH_7A__ 0
54 #if ((__ARM_ARCH_7M__ == 1U) || \
55 (__ARM_ARCH_7EM__ == 1U) || \
56 (__ARM_ARCH_8M_MAIN__ == 1U))
57 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U))
58 #elif (__ARM_ARCH_6M__ == 1U)
59 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
60 #elif (__ARM_ARCH_7A__ == 1U)
62 #define CPSR_MASKBIT_I 0x80U
64 #define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U)
66 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
69 #if (__ARM_ARCH_7A__ == 1U)
70 /* CPSR mode bitmasks */
71 #define CPSR_MODE_USER 0x10U
72 #define CPSR_MODE_SYSTEM 0x1FU
74 #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
76 #define IS_IRQ_MODE() (__get_IPSR() != 0U)
79 #define IS_IRQ() (IS_IRQ_MODE() || (IS_IRQ_MASKED() && (KernelState == osKernelRunning)))
82 #define MAX_BITS_TASK_NOTIFY 31U
83 #define MAX_BITS_EVENT_GROUPS 24U
85 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
86 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
88 /* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */
89 #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
90 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
91 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
93 #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
95 /* Timer callback information structure definition */
101 /* Kernel initialization state */
102 static osKernelState_t KernelState = osKernelInactive;
105 Heap region definition used by heap_5 variant
107 Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
108 heap regions are already defined and vPortDefineHeapRegions is called in application.
110 Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
111 definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
112 is possible by defining it globally or in FreeRTOSConfig.h.
114 #if defined(USE_FreeRTOS_HEAP_5)
115 #if (configAPPLICATION_ALLOCATED_HEAP == 0)
117 FreeRTOS heap is not defined by the application.
118 Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
119 is provided by default. Define configHEAP_5_REGIONS to provide custom
122 #define HEAP_5_REGION_SETUP 1
124 #ifndef configHEAP_5_REGIONS
125 #define configHEAP_5_REGIONS xHeapRegions
127 static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
129 static HeapRegion_t xHeapRegions[] = {
130 { ucHeap, configTOTAL_HEAP_SIZE },
134 /* Global definition is provided to override default heap array */
135 extern HeapRegion_t configHEAP_5_REGIONS[];
139 The application already defined the array used for the FreeRTOS heap and
140 called vPortDefineHeapRegions to initialize heap.
142 #define HEAP_5_REGION_SETUP 0
143 #endif /* configAPPLICATION_ALLOCATED_HEAP */
144 #endif /* USE_FreeRTOS_HEAP_5 */
147 #undef SysTick_Handler
149 /* CMSIS SysTick interrupt handler prototype */
150 extern void SysTick_Handler (void);
151 /* FreeRTOS tick timer interrupt handler prototype */
152 extern void xPortSysTickHandler (void);
155 SysTick handler implementation that also clears overflow flag.
157 void SysTick_Handler (void) {
158 /* Clear overflow flag */
161 if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
162 /* Call tick handler */
163 xPortSysTickHandler();
169 Setup SVC to reset value.
171 __STATIC_INLINE void SVC_Setup (void) {
172 #if (__ARM_ARCH_7A__ == 0U)
173 /* Service Call interrupt might be configured before kernel start */
174 /* and when its priority is lower or equal to BASEPRI, svc intruction */
175 /* causes a Hard Fault. */
176 NVIC_SetPriority (SVCall_IRQn, 0U);
181 Function macro used to retrieve semaphore count from ISR
183 #ifndef uxSemaphoreGetCountFromISR
184 #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
187 /*---------------------------------------------------------------------------*/
189 osStatus_t osKernelInitialize (void) {
196 if (KernelState == osKernelInactive) {
197 #if defined(USE_TRACE_EVENT_RECORDER)
198 EvrFreeRTOSSetup(0U);
200 #if defined(USE_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
201 vPortDefineHeapRegions (configHEAP_5_REGIONS);
203 KernelState = osKernelReady;
213 osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
215 if (version != NULL) {
216 /* Version encoding is major.minor.rev: mmnnnrrrr dec */
217 version->api = KERNEL_VERSION;
218 version->kernel = KERNEL_VERSION;
221 if ((id_buf != NULL) && (id_size != 0U)) {
222 if (id_size > sizeof(KERNEL_ID)) {
223 id_size = sizeof(KERNEL_ID);
225 memcpy(id_buf, KERNEL_ID, id_size);
231 osKernelState_t osKernelGetState (void) {
232 osKernelState_t state;
234 switch (xTaskGetSchedulerState()) {
235 case taskSCHEDULER_RUNNING:
236 state = osKernelRunning;
239 case taskSCHEDULER_SUSPENDED:
240 state = osKernelLocked;
243 case taskSCHEDULER_NOT_STARTED:
245 if (KernelState == osKernelReady) {
246 state = osKernelReady;
248 state = osKernelInactive;
256 osStatus_t osKernelStart (void) {
263 if (KernelState == osKernelReady) {
264 /* Ensure SVC priority is at the reset value */
266 /* Change state to enable IRQ masking check */
267 KernelState = osKernelRunning;
268 /* Start the kernel scheduler */
269 vTaskStartScheduler();
279 int32_t osKernelLock (void) {
283 lock = (int32_t)osErrorISR;
286 switch (xTaskGetSchedulerState()) {
287 case taskSCHEDULER_SUSPENDED:
291 case taskSCHEDULER_RUNNING:
296 case taskSCHEDULER_NOT_STARTED:
298 lock = (int32_t)osError;
306 int32_t osKernelUnlock (void) {
310 lock = (int32_t)osErrorISR;
313 switch (xTaskGetSchedulerState()) {
314 case taskSCHEDULER_SUSPENDED:
317 if (xTaskResumeAll() != pdTRUE) {
318 if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
319 lock = (int32_t)osError;
324 case taskSCHEDULER_RUNNING:
328 case taskSCHEDULER_NOT_STARTED:
330 lock = (int32_t)osError;
338 int32_t osKernelRestoreLock (int32_t lock) {
341 lock = (int32_t)osErrorISR;
344 switch (xTaskGetSchedulerState()) {
345 case taskSCHEDULER_SUSPENDED:
346 case taskSCHEDULER_RUNNING:
352 lock = (int32_t)osError;
355 if (xTaskResumeAll() != pdTRUE) {
356 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
357 lock = (int32_t)osError;
364 case taskSCHEDULER_NOT_STARTED:
366 lock = (int32_t)osError;
374 uint32_t osKernelGetTickCount (void) {
378 ticks = xTaskGetTickCountFromISR();
380 ticks = xTaskGetTickCount();
386 uint32_t osKernelGetTickFreq (void) {
387 return (configTICK_RATE_HZ);
390 uint32_t osKernelGetSysTimerCount (void) {
391 uint32_t irqmask = IS_IRQ_MASKED();
397 ticks = xTaskGetTickCount();
398 val = OS_Tick_GetCount();
400 if (OS_Tick_GetOverflow() != 0U) {
401 val = OS_Tick_GetCount();
404 val += ticks * OS_Tick_GetInterval();
413 uint32_t osKernelGetSysTimerFreq (void) {
414 return (configCPU_CLOCK_HZ);
417 /*---------------------------------------------------------------------------*/
419 osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
428 if (!IS_IRQ() && (func != NULL)) {
429 stack = configMINIMAL_STACK_SIZE;
430 prio = (UBaseType_t)osPriorityNormal;
436 if (attr->name != NULL) {
439 if (attr->priority != osPriorityNone) {
440 prio = (UBaseType_t)attr->priority;
443 if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
447 if (attr->stack_size > 0U) {
448 /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
449 /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
450 stack = attr->stack_size / sizeof(StackType_t);
453 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
454 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
458 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
468 hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem,
469 (StaticTask_t *)attr->cb_mem);
473 if (xTaskCreate ((TaskFunction_t)func, name, (uint16_t)stack, argument, prio, &hTask) != pdPASS) {
480 return ((osThreadId_t)hTask);
483 const char *osThreadGetName (osThreadId_t thread_id) {
484 TaskHandle_t hTask = (TaskHandle_t)thread_id;
487 if (IS_IRQ() || (hTask == NULL)) {
490 name = pcTaskGetName (hTask);
496 osThreadId_t osThreadGetId (void) {
499 id = (osThreadId_t)xTaskGetCurrentTaskHandle();
504 osThreadState_t osThreadGetState (osThreadId_t thread_id) {
505 TaskHandle_t hTask = (TaskHandle_t)thread_id;
506 osThreadState_t state;
508 if (IS_IRQ() || (hTask == NULL)) {
509 state = osThreadError;
512 switch (eTaskGetState (hTask)) {
513 case eRunning: state = osThreadRunning; break;
514 case eReady: state = osThreadReady; break;
516 case eSuspended: state = osThreadBlocked; break;
517 case eDeleted: state = osThreadTerminated; break;
519 default: state = osThreadError; break;
526 uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
527 TaskHandle_t hTask = (TaskHandle_t)thread_id;
530 if (IS_IRQ() || (hTask == NULL)) {
533 sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
539 osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
540 TaskHandle_t hTask = (TaskHandle_t)thread_id;
546 else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
547 stat = osErrorParameter;
551 vTaskPrioritySet (hTask, (UBaseType_t)priority);
557 osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
558 TaskHandle_t hTask = (TaskHandle_t)thread_id;
561 if (IS_IRQ() || (hTask == NULL)) {
562 prio = osPriorityError;
564 prio = (osPriority_t)((int32_t)uxTaskPriorityGet (hTask));
570 osStatus_t osThreadYield (void) {
583 osStatus_t osThreadSuspend (osThreadId_t thread_id) {
584 TaskHandle_t hTask = (TaskHandle_t)thread_id;
590 else if (hTask == NULL) {
591 stat = osErrorParameter;
595 vTaskSuspend (hTask);
601 osStatus_t osThreadResume (osThreadId_t thread_id) {
602 TaskHandle_t hTask = (TaskHandle_t)thread_id;
608 else if (hTask == NULL) {
609 stat = osErrorParameter;
619 __NO_RETURN void osThreadExit (void) {
620 #ifndef USE_FreeRTOS_HEAP_1
626 osStatus_t osThreadTerminate (osThreadId_t thread_id) {
627 TaskHandle_t hTask = (TaskHandle_t)thread_id;
629 #ifndef USE_FreeRTOS_HEAP_1
635 else if (hTask == NULL) {
636 stat = osErrorParameter;
639 tstate = eTaskGetState (hTask);
641 if (tstate != eDeleted) {
645 stat = osErrorResource;
655 uint32_t osThreadGetCount (void) {
661 count = uxTaskGetNumberOfTasks();
667 uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
671 if (IS_IRQ() || (thread_array == NULL) || (array_items == 0U)) {
676 count = uxTaskGetNumberOfTasks();
677 task = pvPortMalloc (count * sizeof(TaskStatus_t));
680 count = uxTaskGetSystemState (task, count, NULL);
682 for (i = 0U; (i < count) && (i < array_items); i++) {
683 thread_array[i] = (osThreadId_t)task[i].xHandle;
687 (void)xTaskResumeAll();
695 uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
696 TaskHandle_t hTask = (TaskHandle_t)thread_id;
700 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
701 rflags = (uint32_t)osErrorParameter;
704 rflags = (uint32_t)osError;
709 (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield);
710 (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL);
712 portYIELD_FROM_ISR (yield);
715 (void)xTaskNotify (hTask, flags, eSetBits);
716 (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags);
719 /* Return flags after setting */
723 uint32_t osThreadFlagsClear (uint32_t flags) {
725 uint32_t rflags, cflags;
728 rflags = (uint32_t)osErrorISR;
730 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
731 rflags = (uint32_t)osErrorParameter;
734 hTask = xTaskGetCurrentTaskHandle();
736 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) {
740 if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
741 rflags = (uint32_t)osError;
745 rflags = (uint32_t)osError;
749 /* Return flags before clearing */
753 uint32_t osThreadFlagsGet (void) {
758 rflags = (uint32_t)osErrorISR;
761 hTask = xTaskGetCurrentTaskHandle();
763 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) {
764 rflags = (uint32_t)osError;
771 uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
772 uint32_t rflags, nval;
774 TickType_t t0, td, tout;
778 rflags = (uint32_t)osErrorISR;
780 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
781 rflags = (uint32_t)osErrorParameter;
784 if ((options & osFlagsNoClear) == osFlagsNoClear) {
793 t0 = xTaskGetTickCount();
795 rval = xTaskNotifyWait (0, clear, &nval, tout);
797 if (rval == pdPASS) {
801 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
802 if ((flags & rflags) == flags) {
806 rflags = (uint32_t)osErrorResource;
812 if ((flags & rflags) != 0) {
816 rflags = (uint32_t)osErrorResource;
823 td = xTaskGetTickCount() - t0;
833 rflags = (uint32_t)osErrorResource;
835 rflags = (uint32_t)osErrorTimeout;
839 while (rval != pdFAIL);
842 /* Return flags before clearing */
846 osStatus_t osDelay (uint32_t ticks) {
863 osStatus_t osDelayUntil (uint32_t ticks) {
864 TickType_t tcnt, delay;
872 tcnt = xTaskGetTickCount();
874 /* Determine remaining number of ticks to delay */
875 delay = (TickType_t)ticks - tcnt;
877 /* Check if target tick has not expired */
878 if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
879 vTaskDelayUntil (&tcnt, delay);
883 /* No delay or already expired */
884 stat = osErrorParameter;
891 /*---------------------------------------------------------------------------*/
893 static void TimerCallback (TimerHandle_t hTimer) {
894 TimerCallback_t *callb;
896 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
899 callb->func (callb->arg);
903 osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
905 TimerHandle_t hTimer;
906 TimerCallback_t *callb;
912 if (!IS_IRQ() && (func != NULL)) {
913 /* Allocate memory to store callback function and argument */
914 callb = pvPortMalloc (sizeof(TimerCallback_t));
918 callb->arg = argument;
920 if (type == osTimerOnce) {
930 if (attr->name != NULL) {
934 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
938 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
948 hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
952 hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
958 return ((osTimerId_t)hTimer);
961 const char *osTimerGetName (osTimerId_t timer_id) {
962 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
965 if (IS_IRQ() || (hTimer == NULL)) {
968 p = pcTimerGetName (hTimer);
974 osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
975 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
981 else if (hTimer == NULL) {
982 stat = osErrorParameter;
985 if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) {
988 stat = osErrorResource;
995 osStatus_t osTimerStop (osTimerId_t timer_id) {
996 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1002 else if (hTimer == NULL) {
1003 stat = osErrorParameter;
1006 if (xTimerIsTimerActive (hTimer) == pdFALSE) {
1007 stat = osErrorResource;
1010 if (xTimerStop (hTimer, 0) == pdPASS) {
1021 uint32_t osTimerIsRunning (osTimerId_t timer_id) {
1022 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1025 if (IS_IRQ() || (hTimer == NULL)) {
1028 running = (uint32_t)xTimerIsTimerActive (hTimer);
1034 osStatus_t osTimerDelete (osTimerId_t timer_id) {
1035 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1037 #ifndef USE_FreeRTOS_HEAP_1
1038 TimerCallback_t *callb;
1043 else if (hTimer == NULL) {
1044 stat = osErrorParameter;
1047 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1049 if (xTimerDelete (hTimer, 0) == pdPASS) {
1053 stat = osErrorResource;
1063 /*---------------------------------------------------------------------------*/
1065 osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
1066 EventGroupHandle_t hEventGroup;
1075 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
1079 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1089 hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
1093 hEventGroup = xEventGroupCreate();
1098 return ((osEventFlagsId_t)hEventGroup);
1101 uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
1102 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1106 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1107 rflags = (uint32_t)osErrorParameter;
1109 else if (IS_IRQ()) {
1112 if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
1113 rflags = (uint32_t)osErrorResource;
1116 portYIELD_FROM_ISR (yield);
1120 rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
1126 uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
1127 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1130 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1131 rflags = (uint32_t)osErrorParameter;
1133 else if (IS_IRQ()) {
1134 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1136 if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
1137 rflags = (uint32_t)osErrorResource;
1141 rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
1147 uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
1148 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1151 if (ef_id == NULL) {
1154 else if (IS_IRQ()) {
1155 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1158 rflags = xEventGroupGetBits (hEventGroup);
1164 uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
1165 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1166 BaseType_t wait_all;
1167 BaseType_t exit_clr;
1170 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1171 rflags = (uint32_t)osErrorParameter;
1173 else if (IS_IRQ()) {
1174 rflags = (uint32_t)osErrorISR;
1177 if (options & osFlagsWaitAll) {
1183 if (options & osFlagsNoClear) {
1189 rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
1191 if (options & osFlagsWaitAll) {
1192 if ((flags & rflags) != flags) {
1194 rflags = (uint32_t)osErrorTimeout;
1196 rflags = (uint32_t)osErrorResource;
1201 if ((flags & rflags) == 0U) {
1203 rflags = (uint32_t)osErrorTimeout;
1205 rflags = (uint32_t)osErrorResource;
1214 osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
1215 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1218 #ifndef USE_FreeRTOS_HEAP_1
1222 else if (hEventGroup == NULL) {
1223 stat = osErrorParameter;
1227 vEventGroupDelete (hEventGroup);
1236 /*---------------------------------------------------------------------------*/
1238 osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
1239 SemaphoreHandle_t hMutex;
1243 #if (configQUEUE_REGISTRY_SIZE > 0)
1251 type = attr->attr_bits;
1256 if ((type & osMutexRecursive) == osMutexRecursive) {
1262 if ((type & osMutexRobust) != osMutexRobust) {
1266 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1270 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1281 hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
1284 hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
1290 hMutex = xSemaphoreCreateRecursiveMutex ();
1292 hMutex = xSemaphoreCreateMutex ();
1297 #if (configQUEUE_REGISTRY_SIZE > 0)
1298 if (hMutex != NULL) {
1304 vQueueAddToRegistry (hMutex, name);
1308 if ((hMutex != NULL) && (rmtx != 0U)) {
1309 hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
1314 return ((osMutexId_t)hMutex);
1317 osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
1318 SemaphoreHandle_t hMutex;
1322 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1324 rmtx = (uint32_t)mutex_id & 1U;
1331 else if (hMutex == NULL) {
1332 stat = osErrorParameter;
1336 if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
1337 if (timeout != 0U) {
1338 stat = osErrorTimeout;
1340 stat = osErrorResource;
1345 if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
1346 if (timeout != 0U) {
1347 stat = osErrorTimeout;
1349 stat = osErrorResource;
1358 osStatus_t osMutexRelease (osMutexId_t mutex_id) {
1359 SemaphoreHandle_t hMutex;
1363 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1365 rmtx = (uint32_t)mutex_id & 1U;
1372 else if (hMutex == NULL) {
1373 stat = osErrorParameter;
1377 if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
1378 stat = osErrorResource;
1382 if (xSemaphoreGive (hMutex) != pdPASS) {
1383 stat = osErrorResource;
1391 osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
1392 SemaphoreHandle_t hMutex;
1395 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1397 if (IS_IRQ() || (hMutex == NULL)) {
1400 owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
1406 osStatus_t osMutexDelete (osMutexId_t mutex_id) {
1408 #ifndef USE_FreeRTOS_HEAP_1
1409 SemaphoreHandle_t hMutex;
1411 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1416 else if (hMutex == NULL) {
1417 stat = osErrorParameter;
1420 #if (configQUEUE_REGISTRY_SIZE > 0)
1421 vQueueUnregisterQueue (hMutex);
1424 vSemaphoreDelete (hMutex);
1433 /*---------------------------------------------------------------------------*/
1435 osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
1436 SemaphoreHandle_t hSemaphore;
1438 #if (configQUEUE_REGISTRY_SIZE > 0)
1444 if (!IS_IRQ() && (max_count > 0U) && (initial_count <= max_count)) {
1448 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1452 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1462 if (max_count == 1U) {
1464 hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
1467 hSemaphore = xSemaphoreCreateBinary();
1470 if ((hSemaphore != NULL) && (initial_count != 0U)) {
1471 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1472 vSemaphoreDelete (hSemaphore);
1479 hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
1482 hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
1486 #if (configQUEUE_REGISTRY_SIZE > 0)
1487 if (hSemaphore != NULL) {
1493 vQueueAddToRegistry (hSemaphore, name);
1499 return ((osSemaphoreId_t)hSemaphore);
1502 osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
1503 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1509 if (hSemaphore == NULL) {
1510 stat = osErrorParameter;
1512 else if (IS_IRQ()) {
1513 if (timeout != 0U) {
1514 stat = osErrorParameter;
1519 if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
1520 stat = osErrorResource;
1522 portYIELD_FROM_ISR (yield);
1527 if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
1528 if (timeout != 0U) {
1529 stat = osErrorTimeout;
1531 stat = osErrorResource;
1539 osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
1540 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1546 if (hSemaphore == NULL) {
1547 stat = osErrorParameter;
1549 else if (IS_IRQ()) {
1552 if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
1553 stat = osErrorResource;
1555 portYIELD_FROM_ISR (yield);
1559 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1560 stat = osErrorResource;
1567 uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
1568 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1571 if (hSemaphore == NULL) {
1574 else if (IS_IRQ()) {
1575 count = uxQueueMessagesWaitingFromISR (hSemaphore);
1577 count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
1583 osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
1584 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1587 #ifndef USE_FreeRTOS_HEAP_1
1591 else if (hSemaphore == NULL) {
1592 stat = osErrorParameter;
1595 #if (configQUEUE_REGISTRY_SIZE > 0)
1596 vQueueUnregisterQueue (hSemaphore);
1600 vSemaphoreDelete (hSemaphore);
1609 /*---------------------------------------------------------------------------*/
1611 osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
1612 QueueHandle_t hQueue;
1614 #if (configQUEUE_REGISTRY_SIZE > 0)
1620 if (!IS_IRQ() && (msg_count > 0U) && (msg_size > 0U)) {
1624 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
1625 (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
1629 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
1630 (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
1640 hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
1644 hQueue = xQueueCreate (msg_count, msg_size);
1648 #if (configQUEUE_REGISTRY_SIZE > 0)
1649 if (hQueue != NULL) {
1655 vQueueAddToRegistry (hQueue, name);
1661 return ((osMessageQueueId_t)hQueue);
1664 osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
1665 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1669 (void)msg_prio; /* Message priority is ignored */
1674 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
1675 stat = osErrorParameter;
1680 if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
1681 stat = osErrorResource;
1683 portYIELD_FROM_ISR (yield);
1688 if ((hQueue == NULL) || (msg_ptr == NULL)) {
1689 stat = osErrorParameter;
1692 if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
1693 if (timeout != 0U) {
1694 stat = osErrorTimeout;
1696 stat = osErrorResource;
1705 osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
1706 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1710 (void)msg_prio; /* Message priority is ignored */
1715 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
1716 stat = osErrorParameter;
1721 if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
1722 stat = osErrorResource;
1724 portYIELD_FROM_ISR (yield);
1729 if ((hQueue == NULL) || (msg_ptr == NULL)) {
1730 stat = osErrorParameter;
1733 if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
1734 if (timeout != 0U) {
1735 stat = osErrorTimeout;
1737 stat = osErrorResource;
1746 uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
1747 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1753 /* capacity = pxQueue->uxLength */
1754 capacity = mq->uxDummy4[1];
1760 uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
1761 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1767 /* size = pxQueue->uxItemSize */
1768 size = mq->uxDummy4[2];
1774 uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
1775 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1778 if (hQueue == NULL) {
1781 else if (IS_IRQ()) {
1782 count = uxQueueMessagesWaitingFromISR (hQueue);
1785 count = uxQueueMessagesWaiting (hQueue);
1788 return ((uint32_t)count);
1791 uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
1792 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1799 else if (IS_IRQ()) {
1800 isrm = taskENTER_CRITICAL_FROM_ISR();
1802 /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
1803 space = mq->uxDummy4[1] - mq->uxDummy4[0];
1805 taskEXIT_CRITICAL_FROM_ISR(isrm);
1808 space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq);
1814 osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
1815 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1821 else if (hQueue == NULL) {
1822 stat = osErrorParameter;
1826 (void)xQueueReset (hQueue);
1832 osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
1833 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1836 #ifndef USE_FreeRTOS_HEAP_1
1840 else if (hQueue == NULL) {
1841 stat = osErrorParameter;
1844 #if (configQUEUE_REGISTRY_SIZE > 0)
1845 vQueueUnregisterQueue (hQueue);
1849 vQueueDelete (hQueue);
1858 /*---------------------------------------------------------------------------*/
1859 #ifdef FREERTOS_MPOOL_H_
1861 /* Static memory pool functions */
1862 static void FreeBlock (MemPool_t *mp, void *block);
1863 static void *AllocBlock (MemPool_t *mp);
1864 static void *CreateBlock (MemPool_t *mp);
1866 osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
1869 int32_t mem_cb, mem_mp;
1871 SemaphoreHandle_t hSemaphore;
1876 else if ((block_count == 0U) || (block_size == 0U)) {
1881 sz = MEMPOOL_ARR_SIZE (block_count, block_size);
1888 if (attr->name != NULL) {
1892 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(MemPool_t))) {
1893 /* Static control block is provided */
1896 else if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1897 /* Allocate control block memory on heap */
1901 if ((attr->mp_mem == NULL) && (attr->mp_size == 0U)) {
1902 /* Allocate memory array on heap */
1906 if (attr->mp_mem != NULL) {
1907 /* Check if array is 4-byte aligned */
1908 if (((uint32_t)attr->mp_mem & 3U) == 0U) {
1909 /* Check if array big enough */
1910 if (attr->mp_size >= sz) {
1911 /* Static memory pool array is provided */
1919 /* Attributes not provided, allocate memory on heap */
1925 mp = pvPortMalloc (sizeof(MemPool_t));
1931 /* Create a semaphore (max count == initial count == block_count) */
1932 hSemaphore = xSemaphoreCreateCountingStatic (block_count, block_count, &mp->sem);
1934 if (hSemaphore == (SemaphoreHandle_t)&mp->sem) {
1935 /* Setup memory array */
1937 mp->mem_arr = pvPortMalloc (sz);
1939 mp->mem_arr = attr->mp_mem;
1944 if ((mp != NULL) && (mp->mem_arr != NULL)) {
1945 /* Memory pool can be created */
1949 mp->bl_sz = block_size;
1950 mp->bl_cnt = block_count;
1953 /* Set heap allocated memory flags */
1954 mp->status = MPOOL_STATUS;
1957 /* Control block on heap */
1961 /* Memory array on heap */
1966 /* Memory pool cannot be created, release allocated resources */
1967 if ((mem_cb == 0) && (mp != NULL)) {
1968 /* Free control block memory */
1978 const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
1979 MemPool_t *mp = (osMemoryPoolId_t)mp_id;
1985 else if (mp_id == NULL) {
1995 void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
2000 if (mp_id == NULL) {
2001 /* Invalid input parameters */
2007 mp = (MemPool_t *)mp_id;
2009 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2011 if (timeout == 0U) {
2012 if (xSemaphoreTakeFromISR ((SemaphoreHandle_t)&mp->sem, NULL) == pdTRUE) {
2013 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2014 isrm = taskENTER_CRITICAL_FROM_ISR();
2016 /* Get a block from the free-list */
2017 block = AllocBlock(mp);
2019 if (block == NULL) {
2020 /* List of free blocks is empty, 'create' new block */
2021 block = CreateBlock(mp);
2024 taskEXIT_CRITICAL_FROM_ISR(isrm);
2030 if (xSemaphoreTake ((SemaphoreHandle_t)&mp->sem, timeout) == pdTRUE) {
2031 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2032 taskENTER_CRITICAL();
2034 /* Get a block from the free-list */
2035 block = AllocBlock(mp);
2037 if (block == NULL) {
2038 /* List of free blocks is empty, 'create' new block */
2039 block = CreateBlock(mp);
2042 taskEXIT_CRITICAL();
2052 osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
2058 if ((mp_id == NULL) || (block == NULL)) {
2059 /* Invalid input parameters */
2060 stat = osErrorParameter;
2063 mp = (MemPool_t *)mp_id;
2065 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2066 /* Invalid object status */
2067 stat = osErrorResource;
2069 else if ((block < (void *)&mp->mem_arr[0]) || (block > (void*)&mp->mem_arr[mp->mem_sz-1])) {
2070 /* Block pointer outside of memory array area */
2071 stat = osErrorParameter;
2077 if (uxSemaphoreGetCountFromISR ((SemaphoreHandle_t)&mp->sem) == mp->bl_cnt) {
2078 stat = osErrorResource;
2081 isrm = taskENTER_CRITICAL_FROM_ISR();
2083 /* Add block to the list of free blocks */
2084 FreeBlock(mp, block);
2086 taskEXIT_CRITICAL_FROM_ISR(isrm);
2089 xSemaphoreGiveFromISR ((SemaphoreHandle_t)&mp->sem, &yield);
2090 portYIELD_FROM_ISR (yield);
2094 if (uxSemaphoreGetCount ((SemaphoreHandle_t)&mp->sem) == mp->bl_cnt) {
2095 stat = osErrorResource;
2098 taskENTER_CRITICAL();
2100 /* Add block to the list of free blocks */
2101 FreeBlock(mp, block);
2103 taskEXIT_CRITICAL();
2105 xSemaphoreGive ((SemaphoreHandle_t)&mp->sem);
2114 uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
2118 if (mp_id == NULL) {
2119 /* Invalid input parameters */
2123 mp = (MemPool_t *)mp_id;
2125 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2126 /* Invalid object status */
2134 /* Return maximum number of memory blocks */
2138 uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
2142 if (mp_id == NULL) {
2143 /* Invalid input parameters */
2147 mp = (MemPool_t *)mp_id;
2149 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2150 /* Invalid object status */
2158 /* Return memory block size in bytes */
2162 uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
2166 if (mp_id == NULL) {
2167 /* Invalid input parameters */
2171 mp = (MemPool_t *)mp_id;
2173 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2174 /* Invalid object status */
2179 n = uxSemaphoreGetCountFromISR ((SemaphoreHandle_t)&mp->sem);
2181 n = uxSemaphoreGetCount ((SemaphoreHandle_t)&mp->sem);
2188 /* Return number of memory blocks used */
2192 uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
2196 if (mp_id == NULL) {
2197 /* Invalid input parameters */
2201 mp = (MemPool_t *)mp_id;
2203 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2204 /* Invalid object status */
2209 n = uxSemaphoreGetCountFromISR ((SemaphoreHandle_t)&mp->sem);
2211 n = uxSemaphoreGetCount ((SemaphoreHandle_t)&mp->sem);
2216 /* Return number of memory blocks available */
2220 osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
2224 if (mp_id == NULL) {
2225 /* Invalid input parameters */
2226 stat = osErrorParameter;
2228 else if (IS_IRQ()) {
2232 mp = (MemPool_t *)mp_id;
2234 taskENTER_CRITICAL();
2236 /* Invalidate control block status */
2237 mp->status = mp->status & 3U;
2239 /* Wake-up tasks waiting for pool semaphore */
2240 while (xSemaphoreGive ((SemaphoreHandle_t)&mp->sem) == pdTRUE);
2246 if ((mp->status & 2U) != 0U) {
2247 /* Memory pool array allocated on heap */
2248 vPortFree (mp->mem_arr);
2250 if ((mp->status & 1U) != 0U) {
2251 /* Memory pool control block allocated on heap */
2255 taskEXIT_CRITICAL();
2264 Create new block given according to the current block index.
2266 static void *CreateBlock (MemPool_t *mp) {
2267 MPOOL_BLOCK *p = NULL;
2269 if (mp->n < mp->bl_cnt) {
2270 /* Unallocated blocks exist, set pointer to new block */
2271 p = (void *)(mp->mem_arr + (mp->bl_sz * mp->n));
2273 /* Increment block index */
2281 Allocate a block by reading the list of free blocks.
2283 static void *AllocBlock (MemPool_t *mp) {
2284 MPOOL_BLOCK *p = NULL;
2286 if (mp->head != NULL) {
2287 /* List of free block exists, get head block */
2290 /* Head block is now next on the list */
2298 Free block by putting it to the list of free blocks.
2300 static void FreeBlock (MemPool_t *mp, void *block) {
2301 MPOOL_BLOCK *p = block;
2303 /* Store current head into block memory space */
2306 /* Store current block as new head */
2309 #endif /* FREERTOS_MPOOL_H_ */
2310 /*---------------------------------------------------------------------------*/
2312 /* Callback function prototypes */
2313 extern void vApplicationIdleHook (void);
2314 extern void vApplicationTickHook (void);
2315 extern void vApplicationMallocFailedHook (void);
2316 extern void vApplicationDaemonTaskStartupHook (void);
2317 extern void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName);
2320 Dummy implementation of the callback function vApplicationIdleHook().
2322 #if (configUSE_IDLE_HOOK == 1)
2323 __WEAK void vApplicationIdleHook (void){}
2327 Dummy implementation of the callback function vApplicationTickHook().
2329 #if (configUSE_TICK_HOOK == 1)
2330 __WEAK void vApplicationTickHook (void){}
2334 Dummy implementation of the callback function vApplicationMallocFailedHook().
2336 #if (configUSE_MALLOC_FAILED_HOOK == 1)
2337 __WEAK void vApplicationMallocFailedHook (void){}
2341 Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
2343 #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
2344 __WEAK void vApplicationDaemonTaskStartupHook (void){}
2348 Dummy implementation of the callback function vApplicationStackOverflowHook().
2350 #if (configCHECK_FOR_STACK_OVERFLOW > 0)
2351 __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName) {
2358 /*---------------------------------------------------------------------------*/
2360 /* External Idle and Timer task static memory allocation functions */
2361 extern void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize);
2362 extern void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize);
2365 vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2366 equals to 1 and is required for static memory allocation support.
2368 __WEAK void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) {
2369 /* Idle task control block and stack */
2370 static StaticTask_t Idle_TCB;
2371 static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
2373 *ppxIdleTaskTCBBuffer = &Idle_TCB;
2374 *ppxIdleTaskStackBuffer = &Idle_Stack[0];
2375 *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
2379 vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2380 equals to 1 and is required for static memory allocation support.
2382 __WEAK void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) {
2383 /* Timer task control block and stack */
2384 static StaticTask_t Timer_TCB;
2385 static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
2387 *ppxTimerTaskTCBBuffer = &Timer_TCB;
2388 *ppxTimerTaskStackBuffer = &Timer_Stack[0];
2389 *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;