1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
4 * SPDX-License-Identifier: Apache-2.0
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
21 *---------------------------------------------------------------------------*/
25 #include "RTE_Components.h" // Component selection
27 #include "cmsis_os2.h" // ::CMSIS:RTOS2
28 #include "cmsis_compiler.h"
31 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
32 #include "task.h" // ARM.FreeRTOS::RTOS:Core
33 #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
34 #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
36 /*---------------------------------------------------------------------------*/
37 #ifndef __ARM_ARCH_6M__
38 #define __ARM_ARCH_6M__ 0
40 #ifndef __ARM_ARCH_7M__
41 #define __ARM_ARCH_7M__ 0
43 #ifndef __ARM_ARCH_7EM__
44 #define __ARM_ARCH_7EM__ 0
46 #ifndef __ARM_ARCH_8M_MAIN__
47 #define __ARM_ARCH_8M_MAIN__ 0
49 #ifndef __ARM_ARCH_7A__
50 #define __ARM_ARCH_7A__ 0
53 #if ((__ARM_ARCH_7M__ == 1U) || \
54 (__ARM_ARCH_7EM__ == 1U) || \
55 (__ARM_ARCH_8M_MAIN__ == 1U))
56 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || ((KernelState == osKernelRunning) && (__get_BASEPRI() != 0U)))
57 #elif (__ARM_ARCH_6M__ == 1U)
58 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) && (KernelState == osKernelRunning))
59 #elif (__ARM_ARCH_7A__ == 1)
60 #define IS_IRQ_MASKED() (0U)
62 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
65 #if (__ARM_ARCH_7A__ == 1U)
66 /* CPSR mode bitmasks */
67 #define CPSR_MODE_USER 0x10U
68 #define CPSR_MODE_SYSTEM 0x1FU
70 #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
72 #define IS_IRQ_MODE() (__get_IPSR() != 0U)
75 #define IS_IRQ() (IS_IRQ_MODE() || IS_IRQ_MASKED())
78 #define MAX_BITS_TASK_NOTIFY 31U
79 #define MAX_BITS_EVENT_GROUPS 24U
81 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
82 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
84 /* Kernel version and identification string definition */
85 #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
86 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
87 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
89 #define KERNEL_ID "FreeRTOS V10.1.1"
91 /* Timer callback information structure definition */
97 /* Kernel initialization state */
98 static osKernelState_t KernelState = osKernelInactive;
101 Heap region definition used by heap_5 variant
103 Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
104 heap regions are already defined and vPortDefineHeapRegions is called in application.
106 Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
107 definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
108 is possible by defining it globally or in FreeRTOSConfig.h.
110 #if defined(RTE_RTOS_FreeRTOS_HEAP_5)
111 #if (configAPPLICATION_ALLOCATED_HEAP == 0)
113 FreeRTOS heap is not defined by the application.
114 Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
115 is provided by default. Define configHEAP_5_REGIONS to provide custom
118 #define HEAP_5_REGION_SETUP 1
120 #ifndef configHEAP_5_REGIONS
121 #define configHEAP_5_REGIONS xHeapRegions
123 static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
125 static HeapRegion_t xHeapRegions[] = {
126 { ucHeap, configTOTAL_HEAP_SIZE },
130 /* Global definition is provided to override default heap array */
131 extern HeapRegion_t configHEAP_5_REGIONS[];
135 The application already defined the array used for the FreeRTOS heap and
136 called vPortDefineHeapRegions to initialize heap.
138 #define HEAP_5_REGION_SETUP 0
139 #endif /* configAPPLICATION_ALLOCATED_HEAP */
140 #endif /* RTE_RTOS_FreeRTOS_HEAP_5 */
143 /* CMSIS SysTick interrupt handler prototype */
144 extern void SysTick_Handler (void);
145 /* FreeRTOS tick timer interrupt handler prototype */
146 extern void xPortSysTickHandler (void);
149 SysTick handler implementation that also clears overflow flag.
151 void SysTick_Handler (void) {
152 /* Clear overflow flag */
155 if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
156 /* Call tick handler */
157 xPortSysTickHandler();
162 /*---------------------------------------------------------------------------*/
164 osStatus_t osKernelInitialize (void) {
171 if (KernelState == osKernelInactive) {
172 EvrFreeRTOSSetup(0U);
173 #if defined(RTE_RTOS_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
174 vPortDefineHeapRegions (configHEAP_5_REGIONS);
176 KernelState = osKernelReady;
186 osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
188 if (version != NULL) {
189 version->api = KERNEL_VERSION;
190 version->kernel = KERNEL_VERSION;
193 if ((id_buf != NULL) && (id_size != 0U)) {
194 if (id_size > sizeof(KERNEL_ID)) {
195 id_size = sizeof(KERNEL_ID);
197 memcpy(id_buf, KERNEL_ID, id_size);
203 osKernelState_t osKernelGetState (void) {
204 osKernelState_t state;
206 switch (xTaskGetSchedulerState()) {
207 case taskSCHEDULER_RUNNING:
208 state = osKernelRunning;
211 case taskSCHEDULER_SUSPENDED:
212 state = osKernelLocked;
215 case taskSCHEDULER_NOT_STARTED:
217 if (KernelState == osKernelReady) {
218 state = osKernelReady;
220 state = osKernelInactive;
228 osStatus_t osKernelStart (void) {
235 if (KernelState == osKernelReady) {
236 KernelState = osKernelRunning;
237 vTaskStartScheduler();
247 int32_t osKernelLock (void) {
251 lock = (int32_t)osErrorISR;
254 switch (xTaskGetSchedulerState()) {
255 case taskSCHEDULER_SUSPENDED:
259 case taskSCHEDULER_RUNNING:
264 case taskSCHEDULER_NOT_STARTED:
266 lock = (int32_t)osError;
274 int32_t osKernelUnlock (void) {
278 lock = (int32_t)osErrorISR;
281 switch (xTaskGetSchedulerState()) {
282 case taskSCHEDULER_SUSPENDED:
285 if (xTaskResumeAll() != pdTRUE) {
286 if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
287 lock = (int32_t)osError;
292 case taskSCHEDULER_RUNNING:
296 case taskSCHEDULER_NOT_STARTED:
298 lock = (int32_t)osError;
306 int32_t osKernelRestoreLock (int32_t lock) {
309 lock = (int32_t)osErrorISR;
312 switch (xTaskGetSchedulerState()) {
313 case taskSCHEDULER_SUSPENDED:
314 case taskSCHEDULER_RUNNING:
320 lock = (int32_t)osError;
323 if (xTaskResumeAll() != pdTRUE) {
324 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
325 lock = (int32_t)osError;
332 case taskSCHEDULER_NOT_STARTED:
334 lock = (int32_t)osError;
342 uint32_t osKernelGetTickCount (void) {
346 ticks = xTaskGetTickCountFromISR();
348 ticks = xTaskGetTickCount();
354 uint32_t osKernelGetTickFreq (void) {
355 return (configTICK_RATE_HZ);
358 uint32_t osKernelGetSysTimerCount (void) {
359 uint32_t primask = __get_PRIMASK();
365 ticks = xTaskGetTickCount();
366 val = OS_Tick_GetCount();
368 if (OS_Tick_GetOverflow() != 0U) {
369 val = OS_Tick_GetCount();
372 val += ticks * OS_Tick_GetInterval();
381 uint32_t osKernelGetSysTimerFreq (void) {
382 return (configCPU_CLOCK_HZ);
385 /*---------------------------------------------------------------------------*/
387 osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
397 if (!IS_IRQ() && (func != NULL)) {
398 stack = configMINIMAL_STACK_SIZE;
399 prio = (UBaseType_t)osPriorityNormal;
406 if (attr->name != NULL) {
409 if (attr->priority != osPriorityNone) {
410 prio = (UBaseType_t)attr->priority;
413 if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
417 if (attr->stack_size > 0U) {
418 /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
419 /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
420 stack = attr->stack_size / sizeof(StackType_t);
423 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
424 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
428 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
438 hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem,
439 (StaticTask_t *)attr->cb_mem);
443 if (xTaskCreate ((TaskFunction_t)func, name, (uint16_t)stack, argument, prio, &hTask) != pdPASS) {
450 return ((osThreadId_t)hTask);
453 const char *osThreadGetName (osThreadId_t thread_id) {
454 TaskHandle_t hTask = (TaskHandle_t)thread_id;
457 if (IS_IRQ() || (hTask == NULL)) {
460 name = pcTaskGetName (hTask);
466 osThreadId_t osThreadGetId (void) {
469 id = (osThreadId_t)xTaskGetCurrentTaskHandle();
474 osThreadState_t osThreadGetState (osThreadId_t thread_id) {
475 TaskHandle_t hTask = (TaskHandle_t)thread_id;
476 osThreadState_t state;
478 if (IS_IRQ() || (hTask == NULL)) {
479 state = osThreadError;
482 switch (eTaskGetState (hTask)) {
483 case eRunning: state = osThreadRunning; break;
484 case eReady: state = osThreadReady; break;
486 case eSuspended: state = osThreadBlocked; break;
487 case eDeleted: state = osThreadTerminated; break;
489 default: state = osThreadError; break;
496 uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
497 TaskHandle_t hTask = (TaskHandle_t)thread_id;
500 if (IS_IRQ() || (hTask == NULL)) {
503 sz = (uint32_t)uxTaskGetStackHighWaterMark (hTask);
509 osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
510 TaskHandle_t hTask = (TaskHandle_t)thread_id;
516 else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
517 stat = osErrorParameter;
521 vTaskPrioritySet (hTask, (UBaseType_t)priority);
527 osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
528 TaskHandle_t hTask = (TaskHandle_t)thread_id;
531 if (IS_IRQ() || (hTask == NULL)) {
532 prio = osPriorityError;
534 prio = (osPriority_t)uxTaskPriorityGet (hTask);
540 osStatus_t osThreadYield (void) {
553 osStatus_t osThreadSuspend (osThreadId_t thread_id) {
554 TaskHandle_t hTask = (TaskHandle_t)thread_id;
560 else if (hTask == NULL) {
561 stat = osErrorParameter;
565 vTaskSuspend (hTask);
571 osStatus_t osThreadResume (osThreadId_t thread_id) {
572 TaskHandle_t hTask = (TaskHandle_t)thread_id;
578 else if (hTask == NULL) {
579 stat = osErrorParameter;
589 __NO_RETURN void osThreadExit (void) {
590 #ifndef RTE_RTOS_FreeRTOS_HEAP_1
596 osStatus_t osThreadTerminate (osThreadId_t thread_id) {
597 TaskHandle_t hTask = (TaskHandle_t)thread_id;
599 #ifndef RTE_RTOS_FreeRTOS_HEAP_1
605 else if (hTask == NULL) {
606 stat = osErrorParameter;
609 tstate = eTaskGetState (hTask);
611 if (tstate != eDeleted) {
615 stat = osErrorResource;
625 uint32_t osThreadGetCount (void) {
631 count = uxTaskGetNumberOfTasks();
637 uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
641 if (IS_IRQ() || (thread_array == NULL) || (array_items == 0U)) {
646 count = uxTaskGetNumberOfTasks();
647 task = pvPortMalloc (count * sizeof(TaskStatus_t));
650 count = uxTaskGetSystemState (task, count, NULL);
652 for (i = 0U; (i < count) && (i < array_items); i++) {
653 thread_array[i] = (osThreadId_t)task[i].xHandle;
657 (void)xTaskResumeAll();
665 uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
666 TaskHandle_t hTask = (TaskHandle_t)thread_id;
670 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
671 rflags = (uint32_t)osErrorParameter;
674 rflags = (uint32_t)osError;
679 (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield);
680 (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL);
682 portYIELD_FROM_ISR (yield);
685 (void)xTaskNotify (hTask, flags, eSetBits);
686 (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags);
689 /* Return flags after setting */
693 uint32_t osThreadFlagsClear (uint32_t flags) {
695 uint32_t rflags, cflags;
698 rflags = (uint32_t)osErrorISR;
700 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
701 rflags = (uint32_t)osErrorParameter;
704 hTask = xTaskGetCurrentTaskHandle();
706 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) {
710 if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
711 rflags = (uint32_t)osError;
715 rflags = (uint32_t)osError;
719 /* Return flags before clearing */
723 uint32_t osThreadFlagsGet (void) {
728 rflags = (uint32_t)osErrorISR;
731 hTask = xTaskGetCurrentTaskHandle();
733 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) {
734 rflags = (uint32_t)osError;
741 uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
742 uint32_t rflags, nval;
744 TickType_t t0, td, tout;
748 rflags = (uint32_t)osErrorISR;
750 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
751 rflags = (uint32_t)osErrorParameter;
754 if ((options & osFlagsNoClear) == osFlagsNoClear) {
763 t0 = xTaskGetTickCount();
765 rval = xTaskNotifyWait (0, clear, &nval, tout);
767 if (rval == pdPASS) {
771 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
772 if ((flags & rflags) == flags) {
776 rflags = (uint32_t)osErrorResource;
782 if ((flags & rflags) != 0) {
786 rflags = (uint32_t)osErrorResource;
793 td = xTaskGetTickCount() - t0;
803 rflags = (uint32_t)osErrorResource;
805 rflags = (uint32_t)osErrorTimeout;
809 while (rval != pdFAIL);
812 /* Return flags before clearing */
816 osStatus_t osDelay (uint32_t ticks) {
833 osStatus_t osDelayUntil (uint32_t ticks) {
834 TickType_t tcnt, delay;
842 tcnt = xTaskGetTickCount();
843 delay = (TickType_t)ticks - tcnt;
845 /* check if target tick has not expired */
846 if(delay && 0 == (delay >> (8 * sizeof(TickType_t) - 1))){
847 vTaskDelayUntil (&tcnt, delay);
851 /* No delay or already expired */
852 stat = osErrorParameter;
859 /*---------------------------------------------------------------------------*/
861 static void TimerCallback (TimerHandle_t hTimer) {
862 TimerCallback_t *callb;
864 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
867 callb->func (callb->arg);
871 osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
873 TimerHandle_t hTimer;
874 TimerCallback_t *callb;
880 if (!IS_IRQ() && (func != NULL)) {
881 /* Allocate memory to store callback function and argument */
882 callb = pvPortMalloc (sizeof(TimerCallback_t));
886 callb->arg = argument;
888 if (type == osTimerOnce) {
898 if (attr->name != NULL) {
902 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
906 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
916 hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
920 hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
926 return ((osTimerId_t)hTimer);
929 const char *osTimerGetName (osTimerId_t timer_id) {
930 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
933 if (IS_IRQ() || (hTimer == NULL)) {
936 p = pcTimerGetName (hTimer);
942 osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
943 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
949 else if (hTimer == NULL) {
950 stat = osErrorParameter;
953 if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) {
956 stat = osErrorResource;
963 osStatus_t osTimerStop (osTimerId_t timer_id) {
964 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
970 else if (hTimer == NULL) {
971 stat = osErrorParameter;
974 if (xTimerIsTimerActive (hTimer) == pdFALSE) {
975 stat = osErrorResource;
978 if (xTimerStop (hTimer, 0) == pdPASS) {
989 uint32_t osTimerIsRunning (osTimerId_t timer_id) {
990 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
993 if (IS_IRQ() || (hTimer == NULL)) {
996 running = (uint32_t)xTimerIsTimerActive (hTimer);
1002 osStatus_t osTimerDelete (osTimerId_t timer_id) {
1003 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1005 #ifndef RTE_RTOS_FreeRTOS_HEAP_1
1006 TimerCallback_t *callb;
1011 else if (hTimer == NULL) {
1012 stat = osErrorParameter;
1015 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1017 if (xTimerDelete (hTimer, 0) == pdPASS) {
1021 stat = osErrorResource;
1031 /*---------------------------------------------------------------------------*/
1033 osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
1034 EventGroupHandle_t hEventGroup;
1043 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
1047 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1057 hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
1061 hEventGroup = xEventGroupCreate();
1066 return ((osEventFlagsId_t)hEventGroup);
1069 uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
1070 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1074 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1075 rflags = (uint32_t)osErrorParameter;
1077 else if (IS_IRQ()) {
1080 if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
1081 rflags = (uint32_t)osErrorResource;
1084 portYIELD_FROM_ISR (yield);
1088 rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
1094 uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
1095 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1098 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1099 rflags = (uint32_t)osErrorParameter;
1101 else if (IS_IRQ()) {
1102 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1104 if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
1105 rflags = (uint32_t)osErrorResource;
1109 rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
1115 uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
1116 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1119 if (ef_id == NULL) {
1122 else if (IS_IRQ()) {
1123 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1126 rflags = xEventGroupGetBits (hEventGroup);
1132 uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
1133 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1134 BaseType_t wait_all;
1135 BaseType_t exit_clr;
1138 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1139 rflags = (uint32_t)osErrorParameter;
1141 else if (IS_IRQ()) {
1142 rflags = (uint32_t)osErrorISR;
1145 if (options & osFlagsWaitAll) {
1151 if (options & osFlagsNoClear) {
1157 rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
1159 if (options & osFlagsWaitAll) {
1160 if (flags != rflags) {
1162 rflags = (uint32_t)osErrorTimeout;
1164 rflags = (uint32_t)osErrorResource;
1169 if ((flags & rflags) == 0U) {
1171 rflags = (uint32_t)osErrorTimeout;
1173 rflags = (uint32_t)osErrorResource;
1182 osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
1183 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1186 #ifndef RTE_RTOS_FreeRTOS_HEAP_1
1190 else if (hEventGroup == NULL) {
1191 stat = osErrorParameter;
1195 vEventGroupDelete (hEventGroup);
1204 /*---------------------------------------------------------------------------*/
1206 osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
1207 SemaphoreHandle_t hMutex;
1211 #if (configQUEUE_REGISTRY_SIZE > 0)
1219 type = attr->attr_bits;
1224 if ((type & osMutexRecursive) == osMutexRecursive) {
1230 if ((type & osMutexRobust) != osMutexRobust) {
1234 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1238 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1249 hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
1252 hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
1258 hMutex = xSemaphoreCreateRecursiveMutex ();
1260 hMutex = xSemaphoreCreateMutex ();
1265 #if (configQUEUE_REGISTRY_SIZE > 0)
1266 if (hMutex != NULL) {
1272 vQueueAddToRegistry (hMutex, name);
1276 if ((hMutex != NULL) && (rmtx != 0U)) {
1277 hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
1282 return ((osMutexId_t)hMutex);
1285 osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
1286 SemaphoreHandle_t hMutex;
1290 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1292 rmtx = (uint32_t)mutex_id & 1U;
1299 else if (hMutex == NULL) {
1300 stat = osErrorParameter;
1304 if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
1305 if (timeout != 0U) {
1306 stat = osErrorTimeout;
1308 stat = osErrorResource;
1313 if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
1314 if (timeout != 0U) {
1315 stat = osErrorTimeout;
1317 stat = osErrorResource;
1326 osStatus_t osMutexRelease (osMutexId_t mutex_id) {
1327 SemaphoreHandle_t hMutex;
1331 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1333 rmtx = (uint32_t)mutex_id & 1U;
1340 else if (hMutex == NULL) {
1341 stat = osErrorParameter;
1345 if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
1346 stat = osErrorResource;
1350 if (xSemaphoreGive (hMutex) != pdPASS) {
1351 stat = osErrorResource;
1359 osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
1360 SemaphoreHandle_t hMutex;
1363 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1365 if (IS_IRQ() || (hMutex == NULL)) {
1368 owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
1374 osStatus_t osMutexDelete (osMutexId_t mutex_id) {
1376 #ifndef RTE_RTOS_FreeRTOS_HEAP_1
1377 SemaphoreHandle_t hMutex;
1379 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1384 else if (hMutex == NULL) {
1385 stat = osErrorParameter;
1388 #if (configQUEUE_REGISTRY_SIZE > 0)
1389 vQueueUnregisterQueue (hMutex);
1392 vSemaphoreDelete (hMutex);
1401 /*---------------------------------------------------------------------------*/
1403 osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
1404 SemaphoreHandle_t hSemaphore;
1406 #if (configQUEUE_REGISTRY_SIZE > 0)
1412 if (!IS_IRQ() && (max_count > 0U) && (initial_count <= max_count)) {
1416 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1420 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1430 if (max_count == 1U) {
1432 hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
1435 hSemaphore = xSemaphoreCreateBinary();
1438 if ((hSemaphore != NULL) && (initial_count != 0U)) {
1439 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1440 vSemaphoreDelete (hSemaphore);
1447 hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
1450 hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
1454 #if (configQUEUE_REGISTRY_SIZE > 0)
1455 if (hSemaphore != NULL) {
1461 vQueueAddToRegistry (hSemaphore, name);
1467 return ((osSemaphoreId_t)hSemaphore);
1470 osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
1471 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1477 if (hSemaphore == NULL) {
1478 stat = osErrorParameter;
1480 else if (IS_IRQ()) {
1481 if (timeout != 0U) {
1482 stat = osErrorParameter;
1487 if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
1488 stat = osErrorResource;
1490 portYIELD_FROM_ISR (yield);
1495 if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
1496 if (timeout != 0U) {
1497 stat = osErrorTimeout;
1499 stat = osErrorResource;
1507 osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
1508 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1514 if (hSemaphore == NULL) {
1515 stat = osErrorParameter;
1517 else if (IS_IRQ()) {
1520 if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
1521 stat = osErrorResource;
1523 portYIELD_FROM_ISR (yield);
1527 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1528 stat = osErrorResource;
1535 uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
1536 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1539 if (hSemaphore == NULL) {
1542 else if (IS_IRQ()) {
1543 count = uxQueueMessagesWaitingFromISR (hSemaphore);
1545 count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
1551 osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
1552 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1555 #ifndef RTE_RTOS_FreeRTOS_HEAP_1
1559 else if (hSemaphore == NULL) {
1560 stat = osErrorParameter;
1563 #if (configQUEUE_REGISTRY_SIZE > 0)
1564 vQueueUnregisterQueue (hSemaphore);
1568 vSemaphoreDelete (hSemaphore);
1577 /*---------------------------------------------------------------------------*/
1579 osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
1580 QueueHandle_t hQueue;
1582 #if (configQUEUE_REGISTRY_SIZE > 0)
1588 if (!IS_IRQ() && (msg_count > 0U) && (msg_size > 0U)) {
1592 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
1593 (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
1597 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
1598 (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
1608 hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
1612 hQueue = xQueueCreate (msg_count, msg_size);
1616 #if (configQUEUE_REGISTRY_SIZE > 0)
1617 if (hQueue != NULL) {
1623 vQueueAddToRegistry (hQueue, name);
1629 return ((osMessageQueueId_t)hQueue);
1632 osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
1633 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1637 (void)msg_prio; /* Message priority is ignored */
1642 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
1643 stat = osErrorParameter;
1648 if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
1649 stat = osErrorResource;
1651 portYIELD_FROM_ISR (yield);
1656 if ((hQueue == NULL) || (msg_ptr == NULL)) {
1657 stat = osErrorParameter;
1660 if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
1661 if (timeout != 0U) {
1662 stat = osErrorTimeout;
1664 stat = osErrorResource;
1673 osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
1674 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1678 (void)msg_prio; /* Message priority is ignored */
1683 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
1684 stat = osErrorParameter;
1689 if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
1690 stat = osErrorResource;
1692 portYIELD_FROM_ISR (yield);
1697 if ((hQueue == NULL) || (msg_ptr == NULL)) {
1698 stat = osErrorParameter;
1701 if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
1702 if (timeout != 0U) {
1703 stat = osErrorTimeout;
1705 stat = osErrorResource;
1714 uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
1715 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1721 /* capacity = pxQueue->uxLength */
1722 capacity = mq->uxDummy4[1];
1728 uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
1729 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1735 /* size = pxQueue->uxItemSize */
1736 size = mq->uxDummy4[2];
1742 uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
1743 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1746 if (hQueue == NULL) {
1749 else if (IS_IRQ()) {
1750 count = uxQueueMessagesWaitingFromISR (hQueue);
1753 count = uxQueueMessagesWaiting (hQueue);
1756 return ((uint32_t)count);
1759 uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
1760 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1767 else if (IS_IRQ()) {
1768 isrm = taskENTER_CRITICAL_FROM_ISR();
1770 /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
1771 space = mq->uxDummy4[1] - mq->uxDummy4[0];
1773 taskEXIT_CRITICAL_FROM_ISR(isrm);
1776 space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq);
1782 osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
1783 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1789 else if (hQueue == NULL) {
1790 stat = osErrorParameter;
1794 (void)xQueueReset (hQueue);
1800 osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
1801 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1804 #ifndef RTE_RTOS_FreeRTOS_HEAP_1
1808 else if (hQueue == NULL) {
1809 stat = osErrorParameter;
1812 #if (configQUEUE_REGISTRY_SIZE > 0)
1813 vQueueUnregisterQueue (hQueue);
1817 vQueueDelete (hQueue);
1826 /*---------------------------------------------------------------------------*/
1828 /* Callback function prototypes */
1829 extern void vApplicationIdleHook (void);
1830 extern void vApplicationTickHook (void);
1831 extern void vApplicationMallocFailedHook (void);
1832 extern void vApplicationDaemonTaskStartupHook (void);
1833 extern void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName);
1836 Dummy implementation of the callback function vApplicationIdleHook().
1838 #if (configUSE_IDLE_HOOK == 1)
1839 __WEAK void vApplicationIdleHook (void){}
1843 Dummy implementation of the callback function vApplicationTickHook().
1845 #if (configUSE_TICK_HOOK == 1)
1846 __WEAK void vApplicationTickHook (void){}
1850 Dummy implementation of the callback function vApplicationMallocFailedHook().
1852 #if (configUSE_MALLOC_FAILED_HOOK == 1)
1853 __WEAK void vApplicationMallocFailedHook (void){}
1857 Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
1859 #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
1860 __WEAK void vApplicationDaemonTaskStartupHook (void){}
1864 Dummy implementation of the callback function vApplicationStackOverflowHook().
1866 #if (configCHECK_FOR_STACK_OVERFLOW > 0)
1867 __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName) {
1873 /*---------------------------------------------------------------------------*/
1875 /* External Idle and Timer task static memory allocation functions */
1876 extern void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize);
1877 extern void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize);
1879 /* Idle task control block and stack */
1880 static StaticTask_t Idle_TCB;
1881 static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
1883 /* Timer task control block and stack */
1884 static StaticTask_t Timer_TCB;
1885 static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
1888 vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
1889 equals to 1 and is required for static memory allocation support.
1891 void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) {
1892 *ppxIdleTaskTCBBuffer = &Idle_TCB;
1893 *ppxIdleTaskStackBuffer = &Idle_Stack[0];
1894 *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
1898 vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
1899 equals to 1 and is required for static memory allocation support.
1901 void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) {
1902 *ppxTimerTaskTCBBuffer = &Timer_TCB;
1903 *ppxTimerTaskStackBuffer = &Timer_Stack[0];
1904 *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;