1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
4 * SPDX-License-Identifier: Apache-2.0
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
21 *---------------------------------------------------------------------------*/
25 #include "cmsis_os2.h" // ::CMSIS:RTOS2
26 #include "cmsis_compiler.h" // Compiler agnostic definitions
27 #include "os_tick.h" // OS Tick API
29 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
30 #include "task.h" // ARM.FreeRTOS::RTOS:Core
31 #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
32 #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
33 #include "timers.h" // ARM.FreeRTOS::RTOS:Timers
35 #include "freertos_mpool.h" // osMemoryPool definitions
36 #include "freertos_os2.h" // Configuration check and setup
38 /*---------------------------------------------------------------------------*/
39 #ifndef __ARM_ARCH_6M__
40 #define __ARM_ARCH_6M__ 0
42 #ifndef __ARM_ARCH_7M__
43 #define __ARM_ARCH_7M__ 0
45 #ifndef __ARM_ARCH_7EM__
46 #define __ARM_ARCH_7EM__ 0
48 #ifndef __ARM_ARCH_8M_MAIN__
49 #define __ARM_ARCH_8M_MAIN__ 0
51 #ifndef __ARM_ARCH_7A__
52 #define __ARM_ARCH_7A__ 0
55 #if ((__ARM_ARCH_7M__ == 1U) || \
56 (__ARM_ARCH_7EM__ == 1U) || \
57 (__ARM_ARCH_8M_MAIN__ == 1U))
58 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U))
59 #elif (__ARM_ARCH_6M__ == 1U)
60 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
61 #elif (__ARM_ARCH_7A__ == 1U)
63 #define CPSR_MASKBIT_I 0x80U
65 #define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U)
67 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
70 #if (__ARM_ARCH_7A__ == 1U)
71 /* CPSR mode bitmasks */
72 #define CPSR_MODE_USER 0x10U
73 #define CPSR_MODE_SYSTEM 0x1FU
75 #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
77 #define IS_IRQ_MODE() (__get_IPSR() != 0U)
81 #define MAX_BITS_TASK_NOTIFY 31U
82 #define MAX_BITS_EVENT_GROUPS 24U
84 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
85 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
87 /* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */
88 #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
89 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
90 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
92 #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
94 /* Timer callback information structure definition */
100 /* Kernel initialization state */
101 static osKernelState_t KernelState = osKernelInactive;
104 Heap region definition used by heap_5 variant
106 Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
107 heap regions are already defined and vPortDefineHeapRegions is called in application.
109 Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
110 definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
111 is possible by defining it globally or in FreeRTOSConfig.h.
113 #if defined(USE_FreeRTOS_HEAP_5)
114 #if (configAPPLICATION_ALLOCATED_HEAP == 0)
116 FreeRTOS heap is not defined by the application.
117 Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
118 is provided by default. Define configHEAP_5_REGIONS to provide custom
121 #define HEAP_5_REGION_SETUP 1
123 #ifndef configHEAP_5_REGIONS
124 #define configHEAP_5_REGIONS xHeapRegions
126 static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
128 static HeapRegion_t xHeapRegions[] = {
129 { ucHeap, configTOTAL_HEAP_SIZE },
133 /* Global definition is provided to override default heap array */
134 extern HeapRegion_t configHEAP_5_REGIONS[];
138 The application already defined the array used for the FreeRTOS heap and
139 called vPortDefineHeapRegions to initialize heap.
141 #define HEAP_5_REGION_SETUP 0
142 #endif /* configAPPLICATION_ALLOCATED_HEAP */
143 #endif /* USE_FreeRTOS_HEAP_5 */
146 #undef SysTick_Handler
148 /* CMSIS SysTick interrupt handler prototype */
149 extern void SysTick_Handler (void);
150 /* FreeRTOS tick timer interrupt handler prototype */
151 extern void xPortSysTickHandler (void);
154 SysTick handler implementation that also clears overflow flag.
156 void SysTick_Handler (void) {
157 #if (configUSE_TICKLESS_IDLE == 0)
158 /* Clear overflow flag */
162 if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
163 /* Call tick handler */
164 xPortSysTickHandler();
170 Setup SVC to reset value.
172 __STATIC_INLINE void SVC_Setup (void) {
173 #if (__ARM_ARCH_7A__ == 0U)
174 /* Service Call interrupt might be configured before kernel start */
175 /* and when its priority is lower or equal to BASEPRI, svc instruction */
176 /* causes a Hard Fault. */
177 NVIC_SetPriority (SVCall_IRQn, 0U);
182 Function macro used to retrieve semaphore count from ISR
184 #ifndef uxSemaphoreGetCountFromISR
185 #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
189 Determine if CPU executes from interrupt context or if interrupts are masked.
191 __STATIC_INLINE uint32_t IRQ_Context (void) {
198 /* Called from interrupt context */
202 /* Get FreeRTOS scheduler state */
203 state = xTaskGetSchedulerState();
205 if (state != taskSCHEDULER_NOT_STARTED) {
206 /* Scheduler was started */
207 if (IS_IRQ_MASKED()) {
208 /* Interrupts are masked */
214 /* Return context, 0: thread context, 1: IRQ context */
219 /* ==== Kernel Management Functions ==== */
222 Initialize the RTOS Kernel.
224 osStatus_t osKernelInitialize (void) {
228 if (IRQ_Context() != 0U) {
232 state = xTaskGetSchedulerState();
234 /* Initialize if scheduler not started and not initialized before */
235 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelInactive)) {
236 #if defined(USE_TRACE_EVENT_RECORDER)
237 /* Initialize the trace macro debugging output channel */
238 EvrFreeRTOSSetup(0U);
240 #if defined(USE_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
241 /* Initialize the memory regions when using heap_5 variant */
242 vPortDefineHeapRegions (configHEAP_5_REGIONS);
244 KernelState = osKernelReady;
251 /* Return execution status */
256 Get RTOS Kernel Information.
258 osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
260 if (version != NULL) {
261 /* Version encoding is major.minor.rev: mmnnnrrrr dec */
262 version->api = KERNEL_VERSION;
263 version->kernel = KERNEL_VERSION;
266 if ((id_buf != NULL) && (id_size != 0U)) {
267 /* Buffer for retrieving identification string is provided */
268 if (id_size > sizeof(KERNEL_ID)) {
269 id_size = sizeof(KERNEL_ID);
271 /* Copy kernel identification string into provided buffer */
272 memcpy(id_buf, KERNEL_ID, id_size);
275 /* Return execution status */
280 Get the current RTOS Kernel state.
282 osKernelState_t osKernelGetState (void) {
283 osKernelState_t state;
285 switch (xTaskGetSchedulerState()) {
286 case taskSCHEDULER_RUNNING:
287 state = osKernelRunning;
290 case taskSCHEDULER_SUSPENDED:
291 state = osKernelLocked;
294 case taskSCHEDULER_NOT_STARTED:
296 if (KernelState == osKernelReady) {
297 /* Ready, osKernelInitialize was already called */
298 state = osKernelReady;
300 /* Not initialized */
301 state = osKernelInactive;
306 /* Return current state */
311 Start the RTOS Kernel scheduler.
313 osStatus_t osKernelStart (void) {
317 if (IRQ_Context() != 0U) {
321 state = xTaskGetSchedulerState();
323 /* Start scheduler if initialized and not started before */
324 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelReady)) {
325 /* Ensure SVC priority is at the reset value */
327 /* Change state to ensure correct API flow */
328 KernelState = osKernelRunning;
329 /* Start the kernel scheduler */
330 vTaskStartScheduler();
337 /* Return execution status */
342 Lock the RTOS Kernel scheduler.
344 int32_t osKernelLock (void) {
347 if (IRQ_Context() != 0U) {
348 lock = (int32_t)osErrorISR;
351 switch (xTaskGetSchedulerState()) {
352 case taskSCHEDULER_SUSPENDED:
356 case taskSCHEDULER_RUNNING:
361 case taskSCHEDULER_NOT_STARTED:
363 lock = (int32_t)osError;
368 /* Return previous lock state */
373 Unlock the RTOS Kernel scheduler.
375 int32_t osKernelUnlock (void) {
378 if (IRQ_Context() != 0U) {
379 lock = (int32_t)osErrorISR;
382 switch (xTaskGetSchedulerState()) {
383 case taskSCHEDULER_SUSPENDED:
386 if (xTaskResumeAll() != pdTRUE) {
387 if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
388 lock = (int32_t)osError;
393 case taskSCHEDULER_RUNNING:
397 case taskSCHEDULER_NOT_STARTED:
399 lock = (int32_t)osError;
404 /* Return previous lock state */
409 Restore the RTOS Kernel scheduler lock state.
411 int32_t osKernelRestoreLock (int32_t lock) {
413 if (IRQ_Context() != 0U) {
414 lock = (int32_t)osErrorISR;
417 switch (xTaskGetSchedulerState()) {
418 case taskSCHEDULER_SUSPENDED:
419 case taskSCHEDULER_RUNNING:
425 lock = (int32_t)osError;
428 if (xTaskResumeAll() != pdTRUE) {
429 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
430 lock = (int32_t)osError;
437 case taskSCHEDULER_NOT_STARTED:
439 lock = (int32_t)osError;
444 /* Return new lock state */
449 Get the RTOS kernel tick count.
451 uint32_t osKernelGetTickCount (void) {
454 if (IRQ_Context() != 0U) {
455 ticks = xTaskGetTickCountFromISR();
457 ticks = xTaskGetTickCount();
460 /* Return kernel tick count */
465 Get the RTOS kernel tick frequency.
467 uint32_t osKernelGetTickFreq (void) {
468 /* Return frequency in hertz */
469 return (configTICK_RATE_HZ);
473 Get the RTOS kernel system timer count.
475 uint32_t osKernelGetSysTimerCount (void) {
476 uint32_t irqmask = IS_IRQ_MASKED();
479 #if (configUSE_TICKLESS_IDLE != 0)
482 /* Low Power Tickless Idle controls timer overflow flag and therefore */
483 /* OS_Tick_GetOverflow may be non-functional. As a workaround a reference */
484 /* time is measured here before disabling interrupts. Timer value overflow */
485 /* is then checked by comparing reference against latest time measurement. */
486 /* Timer count value returned by this method is less accurate but if an */
487 /* overflow is missed, an invalid timer count would be returned. */
488 val0 = OS_Tick_GetCount();
495 ticks = xTaskGetTickCount();
496 val = OS_Tick_GetCount();
498 /* Update tick count and timer value when timer overflows */
499 #if (configUSE_TICKLESS_IDLE != 0)
504 if (OS_Tick_GetOverflow() != 0U) {
505 val = OS_Tick_GetCount();
510 val += ticks * OS_Tick_GetInterval();
516 /* Return system timer count */
521 Get the RTOS kernel system timer frequency.
523 uint32_t osKernelGetSysTimerFreq (void) {
524 /* Return frequency in hertz */
525 return (configCPU_CLOCK_HZ);
529 /* ==== Thread Management Functions ==== */
532 Create a thread and add it to Active Threads.
535 - The memory for control block and stack must be provided in the osThreadAttr_t
536 structure in order to allocate object statically.
537 - Attribute osThreadJoinable is not supported, NULL is returned if used.
539 osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
548 if ((IRQ_Context() == 0U) && (func != NULL)) {
549 stack = configMINIMAL_STACK_SIZE;
550 prio = (UBaseType_t)osPriorityNormal;
556 if (attr->name != NULL) {
559 if (attr->priority != osPriorityNone) {
560 prio = (UBaseType_t)attr->priority;
563 if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
564 /* Invalid priority or unsupported osThreadJoinable attribute used */
568 if (attr->stack_size > 0U) {
569 /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
570 /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
571 stack = attr->stack_size / sizeof(StackType_t);
574 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
575 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
576 /* The memory for control block and stack is provided, use static object */
580 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
581 /* Control block and stack memory will be allocated from the dynamic pool */
591 #if (configSUPPORT_STATIC_ALLOCATION == 1)
592 hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem,
593 (StaticTask_t *)attr->cb_mem);
598 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
599 if (xTaskCreate ((TaskFunction_t)func, name, (configSTACK_DEPTH_TYPE)stack, argument, prio, &hTask) != pdPASS) {
607 /* Return thread ID */
608 return ((osThreadId_t)hTask);
612 Get name of a thread.
614 const char *osThreadGetName (osThreadId_t thread_id) {
615 TaskHandle_t hTask = (TaskHandle_t)thread_id;
618 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
621 name = pcTaskGetName (hTask);
624 /* Return name as null-terminated string */
629 Return the thread ID of the current running thread.
631 osThreadId_t osThreadGetId (void) {
634 id = (osThreadId_t)xTaskGetCurrentTaskHandle();
636 /* Return thread ID */
641 Get current thread state of a thread.
643 osThreadState_t osThreadGetState (osThreadId_t thread_id) {
644 TaskHandle_t hTask = (TaskHandle_t)thread_id;
645 osThreadState_t state;
647 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
648 state = osThreadError;
651 switch (eTaskGetState (hTask)) {
652 case eRunning: state = osThreadRunning; break;
653 case eReady: state = osThreadReady; break;
655 case eSuspended: state = osThreadBlocked; break;
658 default: state = osThreadError; break;
662 /* Return current thread state */
667 Get available stack space of a thread based on stack watermark recording during execution.
669 uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
670 TaskHandle_t hTask = (TaskHandle_t)thread_id;
673 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
676 sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
679 /* Return remaining stack space in bytes */
684 Change priority of a thread.
686 osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
687 TaskHandle_t hTask = (TaskHandle_t)thread_id;
690 if (IRQ_Context() != 0U) {
693 else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
694 stat = osErrorParameter;
698 vTaskPrioritySet (hTask, (UBaseType_t)priority);
701 /* Return execution status */
706 Get current priority of a thread.
708 osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
709 TaskHandle_t hTask = (TaskHandle_t)thread_id;
712 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
713 prio = osPriorityError;
715 prio = (osPriority_t)((int32_t)uxTaskPriorityGet (hTask));
718 /* Return current thread priority */
723 Pass control to next thread that is in state READY.
725 osStatus_t osThreadYield (void) {
728 if (IRQ_Context() != 0U) {
735 /* Return execution status */
739 #if (configUSE_OS2_THREAD_SUSPEND_RESUME == 1)
741 Suspend execution of a thread.
743 osStatus_t osThreadSuspend (osThreadId_t thread_id) {
744 TaskHandle_t hTask = (TaskHandle_t)thread_id;
747 if (IRQ_Context() != 0U) {
750 else if (hTask == NULL) {
751 stat = osErrorParameter;
755 vTaskSuspend (hTask);
758 /* Return execution status */
763 Resume execution of a thread.
765 osStatus_t osThreadResume (osThreadId_t thread_id) {
766 TaskHandle_t hTask = (TaskHandle_t)thread_id;
770 if (IRQ_Context() != 0U) {
773 else if (hTask == NULL) {
774 stat = osErrorParameter;
777 tstate = eTaskGetState (hTask);
779 if (tstate == eSuspended) {
780 /* Thread is suspended */
784 /* Not suspended, might be blocked */
785 if (xTaskAbortDelay(hTask) == pdPASS) {
786 /* Thread was unblocked */
789 /* Thread was not blocked */
790 stat = osErrorResource;
795 /* Return execution status */
798 #endif /* (configUSE_OS2_THREAD_SUSPEND_RESUME == 1) */
801 Terminate execution of current running thread.
803 __NO_RETURN void osThreadExit (void) {
804 #ifndef USE_FreeRTOS_HEAP_1
811 Terminate execution of a thread.
813 osStatus_t osThreadTerminate (osThreadId_t thread_id) {
814 TaskHandle_t hTask = (TaskHandle_t)thread_id;
816 #ifndef USE_FreeRTOS_HEAP_1
819 if (IRQ_Context() != 0U) {
822 else if (hTask == NULL) {
823 stat = osErrorParameter;
826 tstate = eTaskGetState (hTask);
828 if (tstate != eDeleted) {
832 stat = osErrorResource;
839 /* Return execution status */
844 Get number of active threads.
846 uint32_t osThreadGetCount (void) {
849 if (IRQ_Context() != 0U) {
852 count = uxTaskGetNumberOfTasks();
855 /* Return number of active threads */
859 #if (configUSE_OS2_THREAD_ENUMERATE == 1)
861 Enumerate active threads.
863 uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
867 if ((IRQ_Context() != 0U) || (thread_array == NULL) || (array_items == 0U)) {
872 /* Allocate memory on heap to temporarily store TaskStatus_t information */
873 count = uxTaskGetNumberOfTasks();
874 task = pvPortMalloc (count * sizeof(TaskStatus_t));
877 /* Retrieve task status information */
878 count = uxTaskGetSystemState (task, count, NULL);
880 /* Copy handles from task status array into provided thread array */
881 for (i = 0U; (i < count) && (i < array_items); i++) {
882 thread_array[i] = (osThreadId_t)task[i].xHandle;
886 (void)xTaskResumeAll();
891 /* Return number of enumerated threads */
894 #endif /* (configUSE_OS2_THREAD_ENUMERATE == 1) */
897 /* ==== Thread Flags Functions ==== */
899 #if (configUSE_OS2_THREAD_FLAGS == 1)
901 Set the specified Thread Flags of a thread.
903 uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
904 TaskHandle_t hTask = (TaskHandle_t)thread_id;
908 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
909 rflags = (uint32_t)osErrorParameter;
912 rflags = (uint32_t)osError;
914 if (IRQ_Context() != 0U) {
917 (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield);
918 (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL);
920 portYIELD_FROM_ISR (yield);
923 (void)xTaskNotify (hTask, flags, eSetBits);
924 (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags);
927 /* Return flags after setting */
932 Clear the specified Thread Flags of current running thread.
934 uint32_t osThreadFlagsClear (uint32_t flags) {
936 uint32_t rflags, cflags;
938 if (IRQ_Context() != 0U) {
939 rflags = (uint32_t)osErrorISR;
941 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
942 rflags = (uint32_t)osErrorParameter;
945 hTask = xTaskGetCurrentTaskHandle();
947 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) {
951 if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
952 rflags = (uint32_t)osError;
956 rflags = (uint32_t)osError;
960 /* Return flags before clearing */
965 Get the current Thread Flags of current running thread.
967 uint32_t osThreadFlagsGet (void) {
971 if (IRQ_Context() != 0U) {
972 rflags = (uint32_t)osErrorISR;
975 hTask = xTaskGetCurrentTaskHandle();
977 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) {
978 rflags = (uint32_t)osError;
982 /* Return current flags */
987 Wait for one or more Thread Flags of the current running thread to become signaled.
989 uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
990 uint32_t rflags, nval;
992 TickType_t t0, td, tout;
995 if (IRQ_Context() != 0U) {
996 rflags = (uint32_t)osErrorISR;
998 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
999 rflags = (uint32_t)osErrorParameter;
1002 if ((options & osFlagsNoClear) == osFlagsNoClear) {
1011 t0 = xTaskGetTickCount();
1013 rval = xTaskNotifyWait (0, clear, &nval, tout);
1015 if (rval == pdPASS) {
1019 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
1020 if ((flags & rflags) == flags) {
1023 if (timeout == 0U) {
1024 rflags = (uint32_t)osErrorResource;
1030 if ((flags & rflags) != 0) {
1033 if (timeout == 0U) {
1034 rflags = (uint32_t)osErrorResource;
1040 /* Update timeout */
1041 td = xTaskGetTickCount() - t0;
1046 tout = timeout - td;
1051 rflags = (uint32_t)osErrorResource;
1053 rflags = (uint32_t)osErrorTimeout;
1057 while (rval != pdFAIL);
1060 /* Return flags before clearing */
1063 #endif /* (configUSE_OS2_THREAD_FLAGS == 1) */
1066 /* ==== Generic Wait Functions ==== */
1069 Wait for Timeout (Time Delay).
1071 osStatus_t osDelay (uint32_t ticks) {
1074 if (IRQ_Context() != 0U) {
1085 /* Return execution status */
1090 Wait until specified time.
1092 osStatus_t osDelayUntil (uint32_t ticks) {
1093 TickType_t tcnt, delay;
1096 if (IRQ_Context() != 0U) {
1101 tcnt = xTaskGetTickCount();
1103 /* Determine remaining number of ticks to delay */
1104 delay = (TickType_t)ticks - tcnt;
1106 /* Check if target tick has not expired */
1107 if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
1108 if (xTaskDelayUntil (&tcnt, delay) == pdFALSE) {
1115 /* No delay or already expired */
1116 stat = osErrorParameter;
1120 /* Return execution status */
1125 /* ==== Timer Management Functions ==== */
1127 #if (configUSE_OS2_TIMER == 1)
1129 static void TimerCallback (TimerHandle_t hTimer) {
1130 TimerCallback_t *callb;
1132 /* Retrieve pointer to callback function and argument */
1133 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1135 /* Remove dynamic allocation flag */
1136 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1138 if (callb != NULL) {
1139 callb->func (callb->arg);
1144 Create and Initialize a timer.
1146 osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
1148 TimerHandle_t hTimer;
1149 TimerCallback_t *callb;
1156 if ((IRQ_Context() == 0U) && (func != NULL)) {
1160 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1161 /* Static memory allocation is available: check if memory for control block */
1162 /* is provided and if it also contains space for callback and its argument */
1163 if ((attr != NULL) && (attr->cb_mem != NULL)) {
1164 if (attr->cb_size >= (sizeof(StaticTimer_t) + sizeof(TimerCallback_t))) {
1165 callb = (TimerCallback_t *)((uint32_t)attr->cb_mem + sizeof(StaticTimer_t));
1170 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1171 /* Dynamic memory allocation is available: if memory for callback and */
1172 /* its argument is not provided, allocate it from dynamic memory pool */
1173 if (callb == NULL) {
1174 callb = (TimerCallback_t *)pvPortMalloc (sizeof(TimerCallback_t));
1176 if (callb != NULL) {
1177 /* Callback memory was allocated from dynamic pool, set flag */
1183 if (callb != NULL) {
1185 callb->arg = argument;
1187 if (type == osTimerOnce) {
1197 if (attr->name != NULL) {
1201 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
1202 /* The memory for control block is provided, use static object */
1206 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1207 /* Control block will be allocated from the dynamic pool */
1215 /* Store callback memory dynamic allocation flag */
1216 callb = (TimerCallback_t *)((uint32_t)callb | callb_dyn);
1218 TimerCallback function is always provided as a callback and is used to call application
1219 specified function with its argument both stored in structure callb.
1222 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1223 hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
1228 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1229 hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
1234 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1235 if ((hTimer == NULL) && (callb != NULL) && (callb_dyn == 1U)) {
1236 /* Failed to create a timer, release allocated resources */
1237 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1245 /* Return timer ID */
1246 return ((osTimerId_t)hTimer);
1250 Get name of a timer.
1252 const char *osTimerGetName (osTimerId_t timer_id) {
1253 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1256 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1259 p = pcTimerGetName (hTimer);
1262 /* Return name as null-terminated string */
1267 Start or restart a timer.
1269 osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
1270 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1273 if (IRQ_Context() != 0U) {
1276 else if ((hTimer == NULL) || (ticks == 0U)) {
1277 stat = osErrorParameter;
1280 if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) {
1283 stat = osErrorResource;
1287 /* Return execution status */
1294 osStatus_t osTimerStop (osTimerId_t timer_id) {
1295 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1298 if (IRQ_Context() != 0U) {
1301 else if (hTimer == NULL) {
1302 stat = osErrorParameter;
1305 if (xTimerIsTimerActive (hTimer) == pdFALSE) {
1306 stat = osErrorResource;
1309 if (xTimerStop (hTimer, 0) == pdPASS) {
1317 /* Return execution status */
1322 Check if a timer is running.
1324 uint32_t osTimerIsRunning (osTimerId_t timer_id) {
1325 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1328 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1331 running = (uint32_t)xTimerIsTimerActive (hTimer);
1334 /* Return 0: not running, 1: running */
1341 osStatus_t osTimerDelete (osTimerId_t timer_id) {
1342 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1344 #ifndef USE_FreeRTOS_HEAP_1
1345 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1346 TimerCallback_t *callb;
1349 if (IRQ_Context() != 0U) {
1352 else if (hTimer == NULL) {
1353 stat = osErrorParameter;
1356 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1357 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1360 if (xTimerDelete (hTimer, 0) == pdPASS) {
1361 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1362 if ((uint32_t)callb & 1U) {
1363 /* Callback memory was allocated from dynamic pool, clear flag */
1364 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1366 /* Return allocated memory to dynamic pool */
1372 stat = osErrorResource;
1379 /* Return execution status */
1382 #endif /* (configUSE_OS2_TIMER == 1) */
1385 /* ==== Event Flags Management Functions ==== */
1388 Create and Initialize an Event Flags object.
1391 - Event flags are limited to 24 bits.
1393 osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
1394 EventGroupHandle_t hEventGroup;
1399 if (IRQ_Context() == 0U) {
1403 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
1404 /* The memory for control block is provided, use static object */
1408 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1409 /* Control block will be allocated from the dynamic pool */
1419 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1420 hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
1425 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1426 hEventGroup = xEventGroupCreate();
1432 /* Return event flags ID */
1433 return ((osEventFlagsId_t)hEventGroup);
1437 Set the specified Event Flags.
1440 - Event flags are limited to 24 bits.
1442 uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
1443 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1447 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1448 rflags = (uint32_t)osErrorParameter;
1450 else if (IRQ_Context() != 0U) {
1451 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1453 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1454 rflags = (uint32_t)osErrorResource;
1458 if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
1459 rflags = (uint32_t)osErrorResource;
1461 /* Retrieve bits that are already set and add flags to be set in current call */
1462 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1464 portYIELD_FROM_ISR (yield);
1469 rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
1472 /* Return event flags after setting */
1477 Clear the specified Event Flags.
1480 - Event flags are limited to 24 bits.
1482 uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
1483 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1486 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1487 rflags = (uint32_t)osErrorParameter;
1489 else if (IRQ_Context() != 0U) {
1490 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1491 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1492 rflags = (uint32_t)osErrorResource;
1494 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1496 if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
1497 rflags = (uint32_t)osErrorResource;
1500 /* xEventGroupClearBitsFromISR only registers clear operation in the timer command queue. */
1501 /* Yield is required here otherwise clear operation might not execute in the right order. */
1502 /* See https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/93 for more info. */
1503 portYIELD_FROM_ISR (pdTRUE);
1508 rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
1511 /* Return event flags before clearing */
1516 Get the current Event Flags.
1519 - Event flags are limited to 24 bits.
1521 uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
1522 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1525 if (ef_id == NULL) {
1528 else if (IRQ_Context() != 0U) {
1529 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1532 rflags = xEventGroupGetBits (hEventGroup);
1535 /* Return current event flags */
1540 Wait for one or more Event Flags to become signaled.
1543 - Event flags are limited to 24 bits.
1544 - osEventFlagsWait cannot be called from an ISR.
1546 uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
1547 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1548 BaseType_t wait_all;
1549 BaseType_t exit_clr;
1552 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1553 rflags = (uint32_t)osErrorParameter;
1555 else if (IRQ_Context() != 0U) {
1556 if (timeout == 0U) {
1557 /* Try semantic is not supported */
1558 rflags = (uint32_t)osErrorISR;
1560 /* Calling osEventFlagsWait from ISR with non-zero timeout is invalid */
1561 rflags = (uint32_t)osFlagsErrorParameter;
1565 if (options & osFlagsWaitAll) {
1571 if (options & osFlagsNoClear) {
1577 rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
1579 if (options & osFlagsWaitAll) {
1580 if ((flags & rflags) != flags) {
1582 rflags = (uint32_t)osErrorTimeout;
1584 rflags = (uint32_t)osErrorResource;
1589 if ((flags & rflags) == 0U) {
1591 rflags = (uint32_t)osErrorTimeout;
1593 rflags = (uint32_t)osErrorResource;
1599 /* Return event flags before clearing */
1604 Delete an Event Flags object.
1606 osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
1607 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1610 #ifndef USE_FreeRTOS_HEAP_1
1611 if (IRQ_Context() != 0U) {
1614 else if (hEventGroup == NULL) {
1615 stat = osErrorParameter;
1619 vEventGroupDelete (hEventGroup);
1625 /* Return execution status */
1630 /* ==== Mutex Management Functions ==== */
1632 #if (configUSE_OS2_MUTEX == 1)
1634 Create and Initialize a Mutex object.
1637 - Priority inherit protocol is used by default, osMutexPrioInherit attribute is ignored.
1638 - Robust mutex is not supported, NULL is returned if used.
1640 osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
1641 SemaphoreHandle_t hMutex;
1648 if (IRQ_Context() == 0U) {
1650 type = attr->attr_bits;
1655 if ((type & osMutexRecursive) == osMutexRecursive) {
1661 if ((type & osMutexRobust) != osMutexRobust) {
1665 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1666 /* The memory for control block is provided, use static object */
1670 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1671 /* Control block will be allocated from the dynamic pool */
1681 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1683 #if (configUSE_RECURSIVE_MUTEXES == 1)
1684 hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
1688 hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
1694 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1696 #if (configUSE_RECURSIVE_MUTEXES == 1)
1697 hMutex = xSemaphoreCreateRecursiveMutex ();
1700 hMutex = xSemaphoreCreateMutex ();
1706 #if (configQUEUE_REGISTRY_SIZE > 0)
1707 if (hMutex != NULL) {
1708 if ((attr != NULL) && (attr->name != NULL)) {
1709 /* Only non-NULL name objects are added to the Queue Registry */
1710 vQueueAddToRegistry (hMutex, attr->name);
1715 if ((hMutex != NULL) && (rmtx != 0U)) {
1716 /* Set LSB as 'recursive mutex flag' */
1717 hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
1722 /* Return mutex ID */
1723 return ((osMutexId_t)hMutex);
1727 Acquire a Mutex or timeout if it is locked.
1729 osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
1730 SemaphoreHandle_t hMutex;
1734 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1736 /* Extract recursive mutex flag */
1737 rmtx = (uint32_t)mutex_id & 1U;
1741 if (IRQ_Context() != 0U) {
1744 else if (hMutex == NULL) {
1745 stat = osErrorParameter;
1749 #if (configUSE_RECURSIVE_MUTEXES == 1)
1750 if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
1751 if (timeout != 0U) {
1752 stat = osErrorTimeout;
1754 stat = osErrorResource;
1760 if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
1761 if (timeout != 0U) {
1762 stat = osErrorTimeout;
1764 stat = osErrorResource;
1770 /* Return execution status */
1775 Release a Mutex that was acquired by osMutexAcquire.
1777 osStatus_t osMutexRelease (osMutexId_t mutex_id) {
1778 SemaphoreHandle_t hMutex;
1782 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1784 /* Extract recursive mutex flag */
1785 rmtx = (uint32_t)mutex_id & 1U;
1789 if (IRQ_Context() != 0U) {
1792 else if (hMutex == NULL) {
1793 stat = osErrorParameter;
1797 #if (configUSE_RECURSIVE_MUTEXES == 1)
1798 if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
1799 stat = osErrorResource;
1804 if (xSemaphoreGive (hMutex) != pdPASS) {
1805 stat = osErrorResource;
1810 /* Return execution status */
1815 Get Thread which owns a Mutex object.
1817 osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
1818 SemaphoreHandle_t hMutex;
1821 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1823 if ((IRQ_Context() != 0U) || (hMutex == NULL)) {
1826 owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
1829 /* Return owner thread ID */
1834 Delete a Mutex object.
1836 osStatus_t osMutexDelete (osMutexId_t mutex_id) {
1838 #ifndef USE_FreeRTOS_HEAP_1
1839 SemaphoreHandle_t hMutex;
1841 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1843 if (IRQ_Context() != 0U) {
1846 else if (hMutex == NULL) {
1847 stat = osErrorParameter;
1850 #if (configQUEUE_REGISTRY_SIZE > 0)
1851 vQueueUnregisterQueue (hMutex);
1854 vSemaphoreDelete (hMutex);
1860 /* Return execution status */
1863 #endif /* (configUSE_OS2_MUTEX == 1) */
1866 /* ==== Semaphore Management Functions ==== */
1869 Create and Initialize a Semaphore object.
1871 osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
1872 SemaphoreHandle_t hSemaphore;
1877 if ((IRQ_Context() == 0U) && (max_count > 0U) && (initial_count <= max_count)) {
1881 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1882 /* The memory for control block is provided, use static object */
1886 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1887 /* Control block will be allocated from the dynamic pool */
1897 if (max_count == 1U) {
1899 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1900 hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
1904 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1905 hSemaphore = xSemaphoreCreateBinary();
1909 if ((hSemaphore != NULL) && (initial_count != 0U)) {
1910 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1911 vSemaphoreDelete (hSemaphore);
1918 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1919 hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
1923 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1924 hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
1929 #if (configQUEUE_REGISTRY_SIZE > 0)
1930 if (hSemaphore != NULL) {
1931 if ((attr != NULL) && (attr->name != NULL)) {
1932 /* Only non-NULL name objects are added to the Queue Registry */
1933 vQueueAddToRegistry (hSemaphore, attr->name);
1940 /* Return semaphore ID */
1941 return ((osSemaphoreId_t)hSemaphore);
1945 Acquire a Semaphore token or timeout if no tokens are available.
1947 osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
1948 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1954 if (hSemaphore == NULL) {
1955 stat = osErrorParameter;
1957 else if (IRQ_Context() != 0U) {
1958 if (timeout != 0U) {
1959 stat = osErrorParameter;
1964 if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
1965 stat = osErrorResource;
1967 portYIELD_FROM_ISR (yield);
1972 if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
1973 if (timeout != 0U) {
1974 stat = osErrorTimeout;
1976 stat = osErrorResource;
1981 /* Return execution status */
1986 Release a Semaphore token up to the initial maximum count.
1988 osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
1989 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1995 if (hSemaphore == NULL) {
1996 stat = osErrorParameter;
1998 else if (IRQ_Context() != 0U) {
2001 if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
2002 stat = osErrorResource;
2004 portYIELD_FROM_ISR (yield);
2008 if (xSemaphoreGive (hSemaphore) != pdPASS) {
2009 stat = osErrorResource;
2013 /* Return execution status */
2018 Get current Semaphore token count.
2020 uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
2021 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2024 if (hSemaphore == NULL) {
2027 else if (IRQ_Context() != 0U) {
2028 count = (uint32_t)uxSemaphoreGetCountFromISR (hSemaphore);
2030 count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
2033 /* Return number of tokens */
2038 Delete a Semaphore object.
2040 osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
2041 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2044 #ifndef USE_FreeRTOS_HEAP_1
2045 if (IRQ_Context() != 0U) {
2048 else if (hSemaphore == NULL) {
2049 stat = osErrorParameter;
2052 #if (configQUEUE_REGISTRY_SIZE > 0)
2053 vQueueUnregisterQueue (hSemaphore);
2057 vSemaphoreDelete (hSemaphore);
2063 /* Return execution status */
2068 /* ==== Message Queue Management Functions ==== */
2071 Create and Initialize a Message Queue object.
2074 - The memory for control block and and message data must be provided in the
2075 osThreadAttr_t structure in order to allocate object statically.
2077 osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
2078 QueueHandle_t hQueue;
2083 if ((IRQ_Context() == 0U) && (msg_count > 0U) && (msg_size > 0U)) {
2087 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
2088 (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
2089 /* The memory for control block and message data is provided, use static object */
2093 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
2094 (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
2095 /* Control block will be allocated from the dynamic pool */
2105 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2106 hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
2111 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2112 hQueue = xQueueCreate (msg_count, msg_size);
2117 #if (configQUEUE_REGISTRY_SIZE > 0)
2118 if (hQueue != NULL) {
2119 if ((attr != NULL) && (attr->name != NULL)) {
2120 /* Only non-NULL name objects are added to the Queue Registry */
2121 vQueueAddToRegistry (hQueue, attr->name);
2128 /* Return message queue ID */
2129 return ((osMessageQueueId_t)hQueue);
2133 Put a Message into a Queue or timeout if Queue is full.
2136 - Message priority is ignored
2138 osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
2139 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2143 (void)msg_prio; /* Message priority is ignored */
2147 if (IRQ_Context() != 0U) {
2148 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2149 stat = osErrorParameter;
2154 if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
2155 stat = osErrorResource;
2157 portYIELD_FROM_ISR (yield);
2162 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2163 stat = osErrorParameter;
2166 if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2167 if (timeout != 0U) {
2168 stat = osErrorTimeout;
2170 stat = osErrorResource;
2176 /* Return execution status */
2181 Get a Message from a Queue or timeout if Queue is empty.
2184 - Message priority is ignored
2186 osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
2187 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2191 (void)msg_prio; /* Message priority is ignored */
2195 if (IRQ_Context() != 0U) {
2196 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2197 stat = osErrorParameter;
2202 if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
2203 stat = osErrorResource;
2205 portYIELD_FROM_ISR (yield);
2210 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2211 stat = osErrorParameter;
2214 if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2215 if (timeout != 0U) {
2216 stat = osErrorTimeout;
2218 stat = osErrorResource;
2224 /* Return execution status */
2229 Get maximum number of messages in a Message Queue.
2231 uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
2232 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2238 /* capacity = pxQueue->uxLength */
2239 capacity = mq->uxDummy4[1];
2242 /* Return maximum number of messages */
2247 Get maximum message size in a Message Queue.
2249 uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
2250 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2256 /* size = pxQueue->uxItemSize */
2257 size = mq->uxDummy4[2];
2260 /* Return maximum message size */
2265 Get number of queued messages in a Message Queue.
2267 uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
2268 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2271 if (hQueue == NULL) {
2274 else if (IRQ_Context() != 0U) {
2275 count = uxQueueMessagesWaitingFromISR (hQueue);
2278 count = uxQueueMessagesWaiting (hQueue);
2281 /* Return number of queued messages */
2282 return ((uint32_t)count);
2286 Get number of available slots for messages in a Message Queue.
2288 uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
2289 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2296 else if (IRQ_Context() != 0U) {
2297 isrm = taskENTER_CRITICAL_FROM_ISR();
2299 /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
2300 space = mq->uxDummy4[1] - mq->uxDummy4[0];
2302 taskEXIT_CRITICAL_FROM_ISR(isrm);
2305 space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq);
2308 /* Return number of available slots */
2313 Reset a Message Queue to initial empty state.
2315 osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
2316 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2319 if (IRQ_Context() != 0U) {
2322 else if (hQueue == NULL) {
2323 stat = osErrorParameter;
2327 (void)xQueueReset (hQueue);
2330 /* Return execution status */
2335 Delete a Message Queue object.
2337 osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
2338 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2341 #ifndef USE_FreeRTOS_HEAP_1
2342 if (IRQ_Context() != 0U) {
2345 else if (hQueue == NULL) {
2346 stat = osErrorParameter;
2349 #if (configQUEUE_REGISTRY_SIZE > 0)
2350 vQueueUnregisterQueue (hQueue);
2354 vQueueDelete (hQueue);
2360 /* Return execution status */
2365 /* ==== Memory Pool Management Functions ==== */
2367 #ifdef FREERTOS_MPOOL_H_
2368 /* Static memory pool functions */
2369 static void FreeBlock (MemPool_t *mp, void *block);
2370 static void *AllocBlock (MemPool_t *mp);
2371 static void *CreateBlock (MemPool_t *mp);
2374 Create and Initialize a Memory Pool object.
2376 osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
2379 int32_t mem_cb, mem_mp;
2382 if (IRQ_Context() != 0U) {
2385 else if ((block_count == 0U) || (block_size == 0U)) {
2390 sz = MEMPOOL_ARR_SIZE (block_count, block_size);
2397 if (attr->name != NULL) {
2401 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(MemPool_t))) {
2402 /* Static control block is provided */
2405 else if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
2406 /* Allocate control block memory on heap */
2410 if ((attr->mp_mem == NULL) && (attr->mp_size == 0U)) {
2411 /* Allocate memory array on heap */
2415 if (attr->mp_mem != NULL) {
2416 /* Check if array is 4-byte aligned */
2417 if (((uint32_t)attr->mp_mem & 3U) == 0U) {
2418 /* Check if array big enough */
2419 if (attr->mp_size >= sz) {
2420 /* Static memory pool array is provided */
2428 /* Attributes not provided, allocate memory on heap */
2434 mp = pvPortMalloc (sizeof(MemPool_t));
2440 /* Create a semaphore (max count == initial count == block_count) */
2441 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2442 mp->sem = xSemaphoreCreateCountingStatic (block_count, block_count, &mp->mem_sem);
2443 #elif (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2444 mp->sem = xSemaphoreCreateCounting (block_count, block_count);
2449 if (mp->sem != NULL) {
2450 /* Setup memory array */
2452 mp->mem_arr = pvPortMalloc (sz);
2454 mp->mem_arr = attr->mp_mem;
2459 if ((mp != NULL) && (mp->mem_arr != NULL)) {
2460 /* Memory pool can be created */
2464 mp->bl_sz = block_size;
2465 mp->bl_cnt = block_count;
2468 /* Set heap allocated memory flags */
2469 mp->status = MPOOL_STATUS;
2472 /* Control block on heap */
2476 /* Memory array on heap */
2481 /* Memory pool cannot be created, release allocated resources */
2482 if ((mem_cb == 0) && (mp != NULL)) {
2483 /* Free control block memory */
2490 /* Return memory pool ID */
2495 Get name of a Memory Pool object.
2497 const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
2498 MemPool_t *mp = (osMemoryPoolId_t)mp_id;
2501 if (IRQ_Context() != 0U) {
2504 else if (mp_id == NULL) {
2511 /* Return name as null-terminated string */
2516 Allocate a memory block from a Memory Pool.
2518 void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
2523 if (mp_id == NULL) {
2524 /* Invalid input parameters */
2530 mp = (MemPool_t *)mp_id;
2532 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2533 if (IRQ_Context() != 0U) {
2534 if (timeout == 0U) {
2535 if (xSemaphoreTakeFromISR (mp->sem, NULL) == pdTRUE) {
2536 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2537 isrm = taskENTER_CRITICAL_FROM_ISR();
2539 /* Get a block from the free-list */
2540 block = AllocBlock(mp);
2542 if (block == NULL) {
2543 /* List of free blocks is empty, 'create' new block */
2544 block = CreateBlock(mp);
2547 taskEXIT_CRITICAL_FROM_ISR(isrm);
2553 if (xSemaphoreTake (mp->sem, (TickType_t)timeout) == pdTRUE) {
2554 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2555 taskENTER_CRITICAL();
2557 /* Get a block from the free-list */
2558 block = AllocBlock(mp);
2560 if (block == NULL) {
2561 /* List of free blocks is empty, 'create' new block */
2562 block = CreateBlock(mp);
2565 taskEXIT_CRITICAL();
2572 /* Return memory block address */
2577 Return an allocated memory block back to a Memory Pool.
2579 osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
2585 if ((mp_id == NULL) || (block == NULL)) {
2586 /* Invalid input parameters */
2587 stat = osErrorParameter;
2590 mp = (MemPool_t *)mp_id;
2592 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2593 /* Invalid object status */
2594 stat = osErrorResource;
2596 else if ((block < (void *)&mp->mem_arr[0]) || (block > (void*)&mp->mem_arr[mp->mem_sz-1])) {
2597 /* Block pointer outside of memory array area */
2598 stat = osErrorParameter;
2603 if (IRQ_Context() != 0U) {
2604 if (uxSemaphoreGetCountFromISR (mp->sem) == mp->bl_cnt) {
2605 stat = osErrorResource;
2608 isrm = taskENTER_CRITICAL_FROM_ISR();
2610 /* Add block to the list of free blocks */
2611 FreeBlock(mp, block);
2613 taskEXIT_CRITICAL_FROM_ISR(isrm);
2616 xSemaphoreGiveFromISR (mp->sem, &yield);
2617 portYIELD_FROM_ISR (yield);
2621 if (uxSemaphoreGetCount (mp->sem) == mp->bl_cnt) {
2622 stat = osErrorResource;
2625 taskENTER_CRITICAL();
2627 /* Add block to the list of free blocks */
2628 FreeBlock(mp, block);
2630 taskEXIT_CRITICAL();
2632 xSemaphoreGive (mp->sem);
2638 /* Return execution status */
2643 Get maximum number of memory blocks in a Memory Pool.
2645 uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
2649 if (mp_id == NULL) {
2650 /* Invalid input parameters */
2654 mp = (MemPool_t *)mp_id;
2656 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2657 /* Invalid object status */
2665 /* Return maximum number of memory blocks */
2670 Get memory block size in a Memory Pool.
2672 uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
2676 if (mp_id == NULL) {
2677 /* Invalid input parameters */
2681 mp = (MemPool_t *)mp_id;
2683 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2684 /* Invalid object status */
2692 /* Return memory block size in bytes */
2697 Get number of memory blocks used in a Memory Pool.
2699 uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
2703 if (mp_id == NULL) {
2704 /* Invalid input parameters */
2708 mp = (MemPool_t *)mp_id;
2710 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2711 /* Invalid object status */
2715 if (IRQ_Context() != 0U) {
2716 n = uxSemaphoreGetCountFromISR (mp->sem);
2718 n = uxSemaphoreGetCount (mp->sem);
2725 /* Return number of memory blocks used */
2730 Get number of memory blocks available in a Memory Pool.
2732 uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
2736 if (mp_id == NULL) {
2737 /* Invalid input parameters */
2741 mp = (MemPool_t *)mp_id;
2743 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2744 /* Invalid object status */
2748 if (IRQ_Context() != 0U) {
2749 n = uxSemaphoreGetCountFromISR (mp->sem);
2751 n = uxSemaphoreGetCount (mp->sem);
2756 /* Return number of memory blocks available */
2761 Delete a Memory Pool object.
2763 osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
2767 if (mp_id == NULL) {
2768 /* Invalid input parameters */
2769 stat = osErrorParameter;
2771 else if (IRQ_Context() != 0U) {
2775 mp = (MemPool_t *)mp_id;
2777 taskENTER_CRITICAL();
2779 /* Invalidate control block status */
2780 mp->status = mp->status & 3U;
2782 /* Wake-up tasks waiting for pool semaphore */
2783 while (xSemaphoreGive (mp->sem) == pdTRUE);
2789 if ((mp->status & 2U) != 0U) {
2790 /* Memory pool array allocated on heap */
2791 vPortFree (mp->mem_arr);
2793 if ((mp->status & 1U) != 0U) {
2794 /* Memory pool control block allocated on heap */
2798 taskEXIT_CRITICAL();
2803 /* Return execution status */
2808 Create new block given according to the current block index.
2810 static void *CreateBlock (MemPool_t *mp) {
2811 MemPoolBlock_t *p = NULL;
2813 if (mp->n < mp->bl_cnt) {
2814 /* Unallocated blocks exist, set pointer to new block */
2815 p = (void *)(mp->mem_arr + (mp->bl_sz * mp->n));
2817 /* Increment block index */
2825 Allocate a block by reading the list of free blocks.
2827 static void *AllocBlock (MemPool_t *mp) {
2828 MemPoolBlock_t *p = NULL;
2830 if (mp->head != NULL) {
2831 /* List of free block exists, get head block */
2834 /* Head block is now next on the list */
2842 Free block by putting it to the list of free blocks.
2844 static void FreeBlock (MemPool_t *mp, void *block) {
2845 MemPoolBlock_t *p = block;
2847 /* Store current head into block memory space */
2850 /* Store current block as new head */
2853 #endif /* FREERTOS_MPOOL_H_ */
2854 /*---------------------------------------------------------------------------*/
2856 /* Callback function prototypes */
2857 extern void vApplicationIdleHook (void);
2858 extern void vApplicationMallocFailedHook (void);
2859 extern void vApplicationDaemonTaskStartupHook (void);
2862 Dummy implementation of the callback function vApplicationIdleHook().
2864 #if (configUSE_IDLE_HOOK == 1)
2865 __WEAK void vApplicationIdleHook (void){}
2869 Dummy implementation of the callback function vApplicationTickHook().
2871 #if (configUSE_TICK_HOOK == 1)
2872 __WEAK void vApplicationTickHook (void){}
2876 Dummy implementation of the callback function vApplicationMallocFailedHook().
2878 #if (configUSE_MALLOC_FAILED_HOOK == 1)
2879 __WEAK void vApplicationMallocFailedHook (void) {
2880 /* Assert when malloc failed hook is enabled but no application defined function exists */
2886 Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
2888 #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
2889 __WEAK void vApplicationDaemonTaskStartupHook (void){}
2893 Dummy implementation of the callback function vApplicationStackOverflowHook().
2895 #if (configCHECK_FOR_STACK_OVERFLOW > 0)
2896 __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, char *pcTaskName) {
2900 /* Assert when stack overflow is enabled but no application defined function exists */
2905 /*---------------------------------------------------------------------------*/
2906 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2908 vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2909 equals to 1 and is required for static memory allocation support.
2911 __WEAK void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) {
2912 /* Idle task control block and stack */
2913 static StaticTask_t Idle_TCB;
2914 static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
2916 *ppxIdleTaskTCBBuffer = &Idle_TCB;
2917 *ppxIdleTaskStackBuffer = &Idle_Stack[0];
2918 *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
2922 vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2923 equals to 1 and is required for static memory allocation support.
2925 __WEAK void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) {
2926 /* Timer task control block and stack */
2927 static StaticTask_t Timer_TCB;
2928 static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
2930 *ppxTimerTaskTCBBuffer = &Timer_TCB;
2931 *ppxTimerTaskStackBuffer = &Timer_Stack[0];
2932 *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;