1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2024 Arm Limited. All rights reserved.
4 * SPDX-License-Identifier: Apache-2.0
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
21 *---------------------------------------------------------------------------*/
25 #include "cmsis_os2.h" // ::CMSIS:RTOS2
26 #include "cmsis_compiler.h" // Compiler agnostic definitions
27 #include "os_tick.h" // OS Tick API
29 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
30 #include "task.h" // ARM.FreeRTOS::RTOS:Core
31 #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
32 #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
33 #include "timers.h" // ARM.FreeRTOS::RTOS:Timers
35 #include "freertos_mpool.h" // osMemoryPool definitions
36 #include "freertos_os2.h" // Configuration check and setup
38 /*---------------------------------------------------------------------------*/
39 #ifndef __ARM_ARCH_6M__
40 #define __ARM_ARCH_6M__ 0
42 #ifndef __ARM_ARCH_7M__
43 #define __ARM_ARCH_7M__ 0
45 #ifndef __ARM_ARCH_7EM__
46 #define __ARM_ARCH_7EM__ 0
48 #ifndef __ARM_ARCH_8M_MAIN__
49 #define __ARM_ARCH_8M_MAIN__ 0
51 #ifndef __ARM_ARCH_7A__
52 #define __ARM_ARCH_7A__ 0
55 #if ((__ARM_ARCH_7M__ == 1U) || \
56 (__ARM_ARCH_7EM__ == 1U) || \
57 (__ARM_ARCH_8M_MAIN__ == 1U))
58 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U))
59 #elif (__ARM_ARCH_6M__ == 1U)
60 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
61 #elif (__ARM_ARCH_7A__ == 1U)
63 #define CPSR_MASKBIT_I 0x80U
65 #define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U)
67 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
70 #if (__ARM_ARCH_7A__ == 1U)
71 /* CPSR mode bitmasks */
72 #define CPSR_MODE_USER 0x10U
73 #define CPSR_MODE_SYSTEM 0x1FU
75 #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
77 #define IS_IRQ_MODE() (__get_IPSR() != 0U)
81 #define MAX_BITS_TASK_NOTIFY 31U
82 #define MAX_BITS_EVENT_GROUPS 24U
84 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
85 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
87 /* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */
88 #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
89 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
90 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
92 #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
94 /* Timer callback information structure definition */
100 /* Kernel initialization state */
101 static osKernelState_t KernelState = osKernelInactive;
104 Heap region definition used by heap_5 variant
106 Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
107 heap regions are already defined and vPortDefineHeapRegions is called in application.
109 Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
110 definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
111 is possible by defining it globally or in FreeRTOSConfig.h.
113 #if defined(USE_FreeRTOS_HEAP_5)
114 #if (configAPPLICATION_ALLOCATED_HEAP == 0)
116 FreeRTOS heap is not defined by the application.
117 Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
118 is provided by default. Define configHEAP_5_REGIONS to provide custom
121 #define HEAP_5_REGION_SETUP 1
123 #ifndef configHEAP_5_REGIONS
124 #define configHEAP_5_REGIONS xHeapRegions
126 static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
128 static HeapRegion_t xHeapRegions[] = {
129 { ucHeap, configTOTAL_HEAP_SIZE },
133 /* Global definition is provided to override default heap array */
134 extern HeapRegion_t configHEAP_5_REGIONS[];
138 The application already defined the array used for the FreeRTOS heap and
139 called vPortDefineHeapRegions to initialize heap.
141 #define HEAP_5_REGION_SETUP 0
142 #endif /* configAPPLICATION_ALLOCATED_HEAP */
143 #endif /* USE_FreeRTOS_HEAP_5 */
146 #undef SysTick_Handler
148 /* CMSIS SysTick interrupt handler prototype */
149 extern void SysTick_Handler (void);
150 /* FreeRTOS tick timer interrupt handler prototype */
151 extern void xPortSysTickHandler (void);
154 SysTick handler implementation that also clears overflow flag.
156 void SysTick_Handler (void) {
157 #if (configUSE_TICKLESS_IDLE == 0)
158 /* Clear overflow flag */
162 if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
163 /* Call tick handler */
164 xPortSysTickHandler();
170 Determine if CPU executes from interrupt context or if interrupts are masked.
172 __STATIC_INLINE uint32_t IRQ_Context (void) {
179 /* Called from interrupt context */
183 /* Get FreeRTOS scheduler state */
184 state = xTaskGetSchedulerState();
186 if (state != taskSCHEDULER_NOT_STARTED) {
187 /* Scheduler was started */
188 if (IS_IRQ_MASKED()) {
189 /* Interrupts are masked */
195 /* Return context, 0: thread context, 1: IRQ context */
200 /* ==== Kernel Management Functions ==== */
203 Initialize the RTOS Kernel.
205 osStatus_t osKernelInitialize (void) {
209 if (IRQ_Context() != 0U) {
213 state = xTaskGetSchedulerState();
215 /* Initialize if scheduler not started and not initialized before */
216 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelInactive)) {
217 #if defined(USE_TRACE_EVENT_RECORDER)
218 /* Initialize the trace macro debugging output channel */
219 EvrFreeRTOSSetup(0U);
221 #if defined(USE_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
222 /* Initialize the memory regions when using heap_5 variant */
223 vPortDefineHeapRegions (configHEAP_5_REGIONS);
225 KernelState = osKernelReady;
232 /* Return execution status */
237 Get RTOS Kernel Information.
239 osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
241 if (version != NULL) {
242 /* Version encoding is major.minor.rev: mmnnnrrrr dec */
243 version->api = KERNEL_VERSION;
244 version->kernel = KERNEL_VERSION;
247 if ((id_buf != NULL) && (id_size != 0U)) {
248 /* Buffer for retrieving identification string is provided */
249 if (id_size > sizeof(KERNEL_ID)) {
250 id_size = sizeof(KERNEL_ID);
252 /* Copy kernel identification string into provided buffer */
253 memcpy(id_buf, KERNEL_ID, id_size);
256 /* Return execution status */
261 Get the current RTOS Kernel state.
263 osKernelState_t osKernelGetState (void) {
264 osKernelState_t state;
266 switch (xTaskGetSchedulerState()) {
267 case taskSCHEDULER_RUNNING:
268 state = osKernelRunning;
271 case taskSCHEDULER_SUSPENDED:
272 state = osKernelLocked;
275 case taskSCHEDULER_NOT_STARTED:
277 if (KernelState == osKernelReady) {
278 /* Ready, osKernelInitialize was already called */
279 state = osKernelReady;
281 /* Not initialized */
282 state = osKernelInactive;
287 /* Return current state */
292 Start the RTOS Kernel scheduler.
294 osStatus_t osKernelStart (void) {
298 if (IRQ_Context() != 0U) {
302 state = xTaskGetSchedulerState();
304 /* Start scheduler if initialized and not started before */
305 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelReady)) {
306 /* Change state to ensure correct API flow */
307 KernelState = osKernelRunning;
308 /* Start the kernel scheduler */
309 vTaskStartScheduler();
316 /* Return execution status */
321 Lock the RTOS Kernel scheduler.
323 int32_t osKernelLock (void) {
326 if (IRQ_Context() != 0U) {
327 lock = (int32_t)osErrorISR;
330 switch (xTaskGetSchedulerState()) {
331 case taskSCHEDULER_SUSPENDED:
332 /* Suspend scheduler or increment nesting level */
337 case taskSCHEDULER_RUNNING:
342 case taskSCHEDULER_NOT_STARTED:
344 lock = (int32_t)osError;
349 /* Return previous lock state */
354 Unlock the RTOS Kernel scheduler.
356 int32_t osKernelUnlock (void) {
359 if (IRQ_Context() != 0U) {
360 lock = (int32_t)osErrorISR;
363 switch (xTaskGetSchedulerState()) {
364 case taskSCHEDULER_SUSPENDED:
366 /* Resume scheduler or decrement nesting level */
367 (void)xTaskResumeAll();
370 case taskSCHEDULER_RUNNING:
374 case taskSCHEDULER_NOT_STARTED:
376 lock = (int32_t)osError;
381 /* Return previous lock state */
386 Restore the RTOS Kernel scheduler lock state.
388 int32_t osKernelRestoreLock (int32_t lock) {
390 if (IRQ_Context() != 0U) {
391 lock = (int32_t)osErrorISR;
394 switch (xTaskGetSchedulerState()) {
395 case taskSCHEDULER_SUSPENDED:
397 /* Resume scheduler or decrement nesting level */
398 (void)xTaskResumeAll();
402 lock = (int32_t)osError;
407 case taskSCHEDULER_RUNNING:
409 /* Suspend scheduler or increment nesting level */
414 lock = (int32_t)osError;
419 case taskSCHEDULER_NOT_STARTED:
421 lock = (int32_t)osError;
426 /* Return new lock state */
431 Get the RTOS kernel tick count.
433 uint32_t osKernelGetTickCount (void) {
436 if (IRQ_Context() != 0U) {
437 ticks = xTaskGetTickCountFromISR();
439 ticks = xTaskGetTickCount();
442 /* Return kernel tick count */
447 Get the RTOS kernel tick frequency.
449 uint32_t osKernelGetTickFreq (void) {
450 /* Return frequency in hertz */
451 return (configTICK_RATE_HZ);
455 Get the RTOS kernel system timer count.
457 uint32_t osKernelGetSysTimerCount (void) {
458 uint32_t irqmask = IS_IRQ_MASKED();
461 #if (configUSE_TICKLESS_IDLE != 0)
464 /* Low Power Tickless Idle controls timer overflow flag and therefore */
465 /* OS_Tick_GetOverflow may be non-functional. As a workaround a reference */
466 /* time is measured here before disabling interrupts. Timer value overflow */
467 /* is then checked by comparing reference against latest time measurement. */
468 /* Timer count value returned by this method is less accurate but if an */
469 /* overflow is missed, an invalid timer count would be returned. */
470 val0 = OS_Tick_GetCount();
477 ticks = xTaskGetTickCount();
478 val = OS_Tick_GetCount();
480 /* Update tick count and timer value when timer overflows */
481 #if (configUSE_TICKLESS_IDLE != 0)
486 if (OS_Tick_GetOverflow() != 0U) {
487 val = OS_Tick_GetCount();
492 val += ticks * OS_Tick_GetInterval();
498 /* Return system timer count */
503 Get the RTOS kernel system timer frequency.
505 uint32_t osKernelGetSysTimerFreq (void) {
506 /* Return frequency in hertz */
507 return (configCPU_CLOCK_HZ);
511 /* ==== Thread Management Functions ==== */
514 Create a thread and add it to Active Threads.
517 - The memory for control block and stack must be provided in the osThreadAttr_t
518 structure in order to allocate object statically.
519 - Attribute osThreadJoinable is not supported, NULL is returned if used.
521 osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
527 #if (configUSE_OS2_CPU_AFFINITY == 1)
528 UBaseType_t core_aff = tskNO_AFFINITY;
533 if ((IRQ_Context() == 0U) && (func != NULL)) {
534 stack = configMINIMAL_STACK_SIZE;
535 prio = (UBaseType_t)osPriorityNormal;
541 /* Take the name from attributes */
544 if (attr->priority != osPriorityNone) {
545 prio = (UBaseType_t)attr->priority;
548 if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
549 /* Invalid priority or unsupported osThreadJoinable attribute used */
553 if (attr->stack_size > 0U) {
554 /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
555 /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
556 stack = attr->stack_size / sizeof(StackType_t);
559 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
560 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
561 /* The memory for control block and stack is provided, use static object */
565 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
566 /* Control block and stack memory will be allocated from the dynamic pool */
571 #if (configUSE_OS2_CPU_AFFINITY == 1)
572 if (attr->affinity_mask != 0U) {
573 core_aff = attr->affinity_mask;
582 #if (configSUPPORT_STATIC_ALLOCATION == 1)
583 #if (configUSE_OS2_CPU_AFFINITY == 0)
584 hTask = xTaskCreateStatic ((TaskFunction_t)func,
589 (StackType_t *)attr->stack_mem,
590 (StaticTask_t *)attr->cb_mem);
592 hTask = xTaskCreateStaticAffinitySet ((TaskFunction_t)func,
597 (StackType_t *)attr->stack_mem,
598 (StaticTask_t *)attr->cb_mem,
605 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
606 #if (configUSE_OS2_CPU_AFFINITY == 0)
607 if (xTaskCreate ((TaskFunction_t )func,
609 (configSTACK_DEPTH_TYPE)stack,
616 if (xTaskCreateAffinitySet ((TaskFunction_t )func,
618 (configSTACK_DEPTH_TYPE)stack,
631 /* Return thread ID */
632 return ((osThreadId_t)hTask);
636 Get name of a thread.
638 const char *osThreadGetName (osThreadId_t thread_id) {
639 TaskHandle_t hTask = (TaskHandle_t)thread_id;
645 else if (IRQ_Context() != 0U) {
646 /* Retrieve the name even though the function is not allowed to be called from ISR */
647 /* Function implementation allows this therefore we make an exception. */
648 name = pcTaskGetName (hTask);
651 name = pcTaskGetName (hTask);
654 /* Return name as null-terminated string */
659 Return the thread ID of the current running thread.
661 osThreadId_t osThreadGetId (void) {
664 id = (osThreadId_t)xTaskGetCurrentTaskHandle();
666 /* Return thread ID */
671 Get current thread state of a thread.
673 osThreadState_t osThreadGetState (osThreadId_t thread_id) {
674 TaskHandle_t hTask = (TaskHandle_t)thread_id;
675 osThreadState_t state;
677 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
678 state = osThreadError;
681 switch (eTaskGetState (hTask)) {
682 case eRunning: state = osThreadRunning; break;
683 case eReady: state = osThreadReady; break;
685 case eSuspended: state = osThreadBlocked; break;
688 default: state = osThreadError; break;
692 /* Return current thread state */
697 Get available stack space of a thread based on stack watermark recording during execution.
699 uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
700 TaskHandle_t hTask = (TaskHandle_t)thread_id;
703 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
706 sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
709 /* Return remaining stack space in bytes */
714 Change priority of a thread.
716 osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
717 TaskHandle_t hTask = (TaskHandle_t)thread_id;
720 if (IRQ_Context() != 0U) {
723 else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
724 stat = osErrorParameter;
728 vTaskPrioritySet (hTask, (UBaseType_t)priority - 1U);
731 /* Return execution status */
736 Get current priority of a thread.
738 osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
739 TaskHandle_t hTask = (TaskHandle_t)thread_id;
742 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
743 prio = osPriorityError;
745 prio = (osPriority_t)(uxTaskPriorityGet (hTask) + 1U);
748 /* Return current thread priority */
753 Pass control to next thread that is in state READY.
755 osStatus_t osThreadYield (void) {
758 if (IRQ_Context() != 0U) {
765 /* Return execution status */
769 #if (configUSE_OS2_THREAD_SUSPEND_RESUME == 1)
771 Suspend execution of a thread.
773 osStatus_t osThreadSuspend (osThreadId_t thread_id) {
774 TaskHandle_t hTask = (TaskHandle_t)thread_id;
777 if (IRQ_Context() != 0U) {
780 else if (hTask == NULL) {
781 stat = osErrorParameter;
785 vTaskSuspend (hTask);
788 /* Return execution status */
793 Resume execution of a thread.
795 osStatus_t osThreadResume (osThreadId_t thread_id) {
796 TaskHandle_t hTask = (TaskHandle_t)thread_id;
800 if (IRQ_Context() != 0U) {
803 else if (hTask == NULL) {
804 stat = osErrorParameter;
807 tstate = eTaskGetState (hTask);
809 if (tstate == eSuspended) {
810 /* Thread is suspended */
814 /* Not suspended, might be blocked */
815 if (xTaskAbortDelay(hTask) == pdPASS) {
816 /* Thread was unblocked */
819 /* Thread was not blocked */
820 stat = osErrorResource;
825 /* Return execution status */
828 #endif /* (configUSE_OS2_THREAD_SUSPEND_RESUME == 1) */
831 Terminate execution of current running thread.
833 __NO_RETURN void osThreadExit (void) {
834 #ifndef USE_FreeRTOS_HEAP_1
841 Terminate execution of a thread.
843 osStatus_t osThreadTerminate (osThreadId_t thread_id) {
844 TaskHandle_t hTask = (TaskHandle_t)thread_id;
846 #ifndef USE_FreeRTOS_HEAP_1
849 if (IRQ_Context() != 0U) {
852 else if (hTask == NULL) {
853 stat = osErrorParameter;
856 tstate = eTaskGetState (hTask);
858 if (tstate != eDeleted) {
862 stat = osErrorResource;
869 /* Return execution status */
874 Get number of active threads.
876 uint32_t osThreadGetCount (void) {
879 if (IRQ_Context() != 0U) {
882 count = uxTaskGetNumberOfTasks();
885 /* Return number of active threads */
889 #if (configUSE_OS2_THREAD_ENUMERATE == 1)
891 Enumerate active threads.
893 uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
897 if ((IRQ_Context() != 0U) || (thread_array == NULL) || (array_items == 0U)) {
902 /* Allocate memory on heap to temporarily store TaskStatus_t information */
903 count = uxTaskGetNumberOfTasks();
904 task = pvPortMalloc (count * sizeof(TaskStatus_t));
907 /* Retrieve task status information */
908 count = uxTaskGetSystemState (task, count, NULL);
910 /* Copy handles from task status array into provided thread array */
911 for (i = 0U; (i < count) && (i < array_items); i++) {
912 thread_array[i] = (osThreadId_t)task[i].xHandle;
916 (void)xTaskResumeAll();
921 /* Return number of enumerated threads */
924 #endif /* (configUSE_OS2_THREAD_ENUMERATE == 1) */
926 #if (configUSE_OS2_CPU_AFFINITY == 1)
928 Set processor affinity mask of a thread.
930 osStatus_t osThreadSetAffinityMask (osThreadId_t thread_id, uint32_t affinity_mask) {
931 TaskHandle_t hTask = (TaskHandle_t)thread_id;
934 if (IRQ_Context() != 0U) {
937 else if (hTask == NULL) {
938 stat = osErrorParameter;
942 vTaskCoreAffinitySet (hTask, (UBaseType_t)affinity_mask);
945 /* Return execution status */
950 Get current processor affinity mask of a thread.
952 uint32_t osThreadGetAffinityMask (osThreadId_t thread_id) {
953 TaskHandle_t hTask = (TaskHandle_t)thread_id;
954 UBaseType_t affinity_mask;
956 if (IRQ_Context() != 0U) {
959 else if (hTask == NULL) {
963 affinity_mask = vTaskCoreAffinityGet (hTask);
966 /* Return current processor affinity mask */
967 return ((uint32_t)affinity_mask);
969 #endif /* (configUSE_OS2_CPU_AFFINITY == 1) */
971 /* ==== Thread Flags Functions ==== */
973 #if (configUSE_OS2_THREAD_FLAGS == 1)
975 Set the specified Thread Flags of a thread.
977 uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
978 TaskHandle_t hTask = (TaskHandle_t)thread_id;
982 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
983 rflags = (uint32_t)osErrorParameter;
986 rflags = (uint32_t)osError;
988 if (IRQ_Context() != 0U) {
991 (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield);
992 (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL);
994 portYIELD_FROM_ISR (yield);
997 (void)xTaskNotify (hTask, flags, eSetBits);
998 (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags);
1001 /* Return flags after setting */
1006 Clear the specified Thread Flags of current running thread.
1008 uint32_t osThreadFlagsClear (uint32_t flags) {
1010 uint32_t rflags, cflags;
1012 if (IRQ_Context() != 0U) {
1013 rflags = (uint32_t)osErrorISR;
1015 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
1016 rflags = (uint32_t)osErrorParameter;
1019 hTask = xTaskGetCurrentTaskHandle();
1021 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) {
1025 if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
1026 rflags = (uint32_t)osError;
1030 rflags = (uint32_t)osError;
1034 /* Return flags before clearing */
1039 Get the current Thread Flags of current running thread.
1041 uint32_t osThreadFlagsGet (void) {
1045 if (IRQ_Context() != 0U) {
1046 rflags = (uint32_t)osErrorISR;
1049 hTask = xTaskGetCurrentTaskHandle();
1051 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) {
1052 rflags = (uint32_t)osError;
1056 /* Return current flags */
1061 Wait for one or more Thread Flags of the current running thread to become signaled.
1063 uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
1065 uint32_t rflags, nval;
1067 TickType_t t0, td, tout;
1069 BaseType_t notify = pdFALSE;
1071 if (IRQ_Context() != 0U) {
1072 rflags = (uint32_t)osErrorISR;
1074 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
1075 rflags = (uint32_t)osErrorParameter;
1078 if ((options & osFlagsNoClear) == osFlagsNoClear) {
1087 t0 = xTaskGetTickCount();
1089 rval = xTaskNotifyWait (0, clear, &nval, tout);
1091 if (rval == pdPASS) {
1095 if ((rflags & ~flags) != 0) {
1096 /* Other flags already set, notify task to change its state */
1100 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
1101 if ((flags & rflags) == flags) {
1104 if (timeout == 0U) {
1105 rflags = (uint32_t)osErrorResource;
1111 if ((flags & rflags) != 0) {
1114 if (timeout == 0U) {
1115 rflags = (uint32_t)osErrorResource;
1121 /* Update timeout */
1122 td = xTaskGetTickCount() - t0;
1127 tout = timeout - td;
1132 rflags = (uint32_t)osErrorResource;
1134 rflags = (uint32_t)osErrorTimeout;
1138 while (rval != pdFAIL);
1141 if (notify == pdTRUE) {
1142 hTask = xTaskGetCurrentTaskHandle();
1144 /* Ensure task is already notified without changing existing flags */
1145 if (xTaskNotify(hTask, 0, eNoAction) != pdPASS) {
1146 rflags = (uint32_t)osError;
1150 /* Return flags before clearing */
1153 #endif /* (configUSE_OS2_THREAD_FLAGS == 1) */
1156 /* ==== Generic Wait Functions ==== */
1159 Wait for Timeout (Time Delay).
1161 osStatus_t osDelay (uint32_t ticks) {
1164 if (IRQ_Context() != 0U) {
1175 /* Return execution status */
1180 Wait until specified time.
1182 osStatus_t osDelayUntil (uint32_t ticks) {
1183 TickType_t tcnt, delay;
1186 if (IRQ_Context() != 0U) {
1191 tcnt = xTaskGetTickCount();
1193 /* Determine remaining number of ticks to delay */
1194 delay = (TickType_t)ticks - tcnt;
1196 /* Check if target tick has not expired */
1197 if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
1198 if (xTaskDelayUntil (&tcnt, delay) == pdFALSE) {
1205 /* No delay or already expired */
1206 stat = osErrorParameter;
1210 /* Return execution status */
1215 /* ==== Timer Management Functions ==== */
1217 #if (configUSE_OS2_TIMER == 1)
1219 static void TimerCallback (TimerHandle_t hTimer) {
1220 TimerCallback_t *callb;
1222 /* Retrieve pointer to callback function and argument */
1223 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1225 /* Remove dynamic allocation flag */
1226 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1228 if (callb != NULL) {
1229 callb->func (callb->arg);
1234 Create and Initialize a timer.
1236 osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
1238 TimerHandle_t hTimer;
1239 TimerCallback_t *callb;
1246 if ((IRQ_Context() == 0U) && (func != NULL)) {
1250 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1251 /* Static memory allocation is available: check if memory for control block */
1252 /* is provided and if it also contains space for callback and its argument */
1253 if ((attr != NULL) && (attr->cb_mem != NULL)) {
1254 if (attr->cb_size >= (sizeof(StaticTimer_t) + sizeof(TimerCallback_t))) {
1255 callb = (TimerCallback_t *)((uint32_t)attr->cb_mem + sizeof(StaticTimer_t));
1260 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1261 /* Dynamic memory allocation is available: if memory for callback and */
1262 /* its argument is not provided, allocate it from dynamic memory pool */
1263 if (callb == NULL) {
1264 callb = (TimerCallback_t *)pvPortMalloc (sizeof(TimerCallback_t));
1266 if (callb != NULL) {
1267 /* Callback memory was allocated from dynamic pool, set flag */
1273 if (callb != NULL) {
1275 callb->arg = argument;
1277 if (type == osTimerOnce) {
1287 /* Take the name from attributes */
1290 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
1291 /* The memory for control block is provided, use static object */
1295 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1296 /* Control block will be allocated from the dynamic pool */
1304 /* Store callback memory dynamic allocation flag */
1305 callb = (TimerCallback_t *)((uint32_t)callb | callb_dyn);
1307 TimerCallback function is always provided as a callback and is used to call application
1308 specified function with its argument both stored in structure callb.
1311 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1312 hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
1317 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1318 hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
1323 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1324 if ((hTimer == NULL) && (callb != NULL) && (callb_dyn == 1U)) {
1325 /* Failed to create a timer, release allocated resources */
1326 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1334 /* Return timer ID */
1335 return ((osTimerId_t)hTimer);
1339 Get name of a timer.
1341 const char *osTimerGetName (osTimerId_t timer_id) {
1342 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1345 if (hTimer == NULL) {
1348 else if (IRQ_Context() != 0U) {
1349 /* Retrieve the name even though the function is not allowed to be called from ISR */
1350 /* Function implementation allows this therefore we make an exception. */
1351 p = pcTimerGetName (hTimer);
1354 p = pcTimerGetName (hTimer);
1357 /* Return name as null-terminated string */
1362 Start or restart a timer.
1364 osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
1365 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1368 if (IRQ_Context() != 0U) {
1371 else if ((hTimer == NULL) || (ticks == 0U)) {
1372 stat = osErrorParameter;
1375 if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) {
1378 stat = osErrorResource;
1382 /* Return execution status */
1389 osStatus_t osTimerStop (osTimerId_t timer_id) {
1390 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1393 if (IRQ_Context() != 0U) {
1396 else if (hTimer == NULL) {
1397 stat = osErrorParameter;
1400 if (xTimerIsTimerActive (hTimer) == pdFALSE) {
1401 stat = osErrorResource;
1404 if (xTimerStop (hTimer, 0) == pdPASS) {
1412 /* Return execution status */
1417 Check if a timer is running.
1419 uint32_t osTimerIsRunning (osTimerId_t timer_id) {
1420 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1423 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1426 running = (uint32_t)xTimerIsTimerActive (hTimer);
1429 /* Return 0: not running, 1: running */
1436 osStatus_t osTimerDelete (osTimerId_t timer_id) {
1437 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1439 #ifndef USE_FreeRTOS_HEAP_1
1440 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1441 TimerCallback_t *callb;
1444 if (IRQ_Context() != 0U) {
1447 else if (hTimer == NULL) {
1448 stat = osErrorParameter;
1451 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1452 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1455 if (xTimerDelete (hTimer, 0) == pdPASS) {
1456 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1457 if ((uint32_t)callb & 1U) {
1458 /* Callback memory was allocated from dynamic pool, clear flag */
1459 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1461 /* Return allocated memory to dynamic pool */
1467 stat = osErrorResource;
1474 /* Return execution status */
1477 #endif /* (configUSE_OS2_TIMER == 1) */
1480 /* ==== Event Flags Management Functions ==== */
1483 Create and Initialize an Event Flags object.
1486 - Event flags are limited to 24 bits.
1488 osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
1489 EventGroupHandle_t hEventGroup;
1494 if (IRQ_Context() == 0U) {
1498 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
1499 /* The memory for control block is provided, use static object */
1503 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1504 /* Control block will be allocated from the dynamic pool */
1514 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1515 hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
1520 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1521 hEventGroup = xEventGroupCreate();
1527 /* Return event flags ID */
1528 return ((osEventFlagsId_t)hEventGroup);
1532 Set the specified Event Flags.
1535 - Event flags are limited to 24 bits.
1537 uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
1538 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1542 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1543 rflags = (uint32_t)osErrorParameter;
1545 else if (IRQ_Context() != 0U) {
1546 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1548 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1549 rflags = (uint32_t)osErrorResource;
1553 if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
1554 rflags = (uint32_t)osErrorResource;
1556 /* Retrieve bits that are already set and add flags to be set in current call */
1557 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1559 portYIELD_FROM_ISR (yield);
1564 rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
1567 /* Return event flags after setting */
1572 Clear the specified Event Flags.
1575 - Event flags are limited to 24 bits.
1577 uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
1578 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1581 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1582 rflags = (uint32_t)osErrorParameter;
1584 else if (IRQ_Context() != 0U) {
1585 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1586 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1587 rflags = (uint32_t)osErrorResource;
1589 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1591 if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
1592 rflags = (uint32_t)osErrorResource;
1595 /* xEventGroupClearBitsFromISR only registers clear operation in the timer command queue. */
1596 /* Yield is required here otherwise clear operation might not execute in the right order. */
1597 /* See https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/93 for more info. */
1598 portYIELD_FROM_ISR (pdTRUE);
1603 rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
1606 /* Return event flags before clearing */
1611 Get the current Event Flags.
1614 - Event flags are limited to 24 bits.
1616 uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
1617 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1620 if (ef_id == NULL) {
1623 else if (IRQ_Context() != 0U) {
1624 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1627 rflags = xEventGroupGetBits (hEventGroup);
1630 /* Return current event flags */
1635 Wait for one or more Event Flags to become signaled.
1638 - Event flags are limited to 24 bits.
1639 - osEventFlagsWait cannot be called from an ISR.
1641 uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
1642 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1643 BaseType_t wait_all;
1644 BaseType_t exit_clr;
1647 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1648 rflags = (uint32_t)osErrorParameter;
1650 else if (IRQ_Context() != 0U) {
1651 if (timeout == 0U) {
1652 /* Try semantic is not supported */
1653 rflags = (uint32_t)osErrorISR;
1655 /* Calling osEventFlagsWait from ISR with non-zero timeout is invalid */
1656 rflags = (uint32_t)osFlagsErrorParameter;
1660 if (options & osFlagsWaitAll) {
1666 if (options & osFlagsNoClear) {
1672 rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
1674 if (options & osFlagsWaitAll) {
1675 if ((flags & rflags) != flags) {
1677 rflags = (uint32_t)osErrorTimeout;
1679 rflags = (uint32_t)osErrorResource;
1684 if ((flags & rflags) == 0U) {
1686 rflags = (uint32_t)osErrorTimeout;
1688 rflags = (uint32_t)osErrorResource;
1694 /* Return event flags before clearing */
1699 Delete an Event Flags object.
1701 osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
1702 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1705 #ifndef USE_FreeRTOS_HEAP_1
1706 if (IRQ_Context() != 0U) {
1709 else if (hEventGroup == NULL) {
1710 stat = osErrorParameter;
1714 vEventGroupDelete (hEventGroup);
1720 /* Return execution status */
1725 /* ==== Mutex Management Functions ==== */
1727 #if (configUSE_OS2_MUTEX == 1)
1729 Create and Initialize a Mutex object.
1732 - Priority inherit protocol is used by default, osMutexPrioInherit attribute is ignored.
1733 - Robust mutex is not supported, NULL is returned if used.
1735 osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
1736 SemaphoreHandle_t hMutex;
1743 if (IRQ_Context() == 0U) {
1745 type = attr->attr_bits;
1750 if ((type & osMutexRecursive) == osMutexRecursive) {
1756 if ((type & osMutexRobust) != osMutexRobust) {
1760 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1761 /* The memory for control block is provided, use static object */
1765 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1766 /* Control block will be allocated from the dynamic pool */
1776 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1778 #if (configUSE_RECURSIVE_MUTEXES == 1)
1779 hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
1783 hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
1789 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1791 #if (configUSE_RECURSIVE_MUTEXES == 1)
1792 hMutex = xSemaphoreCreateRecursiveMutex ();
1795 hMutex = xSemaphoreCreateMutex ();
1801 #if (configQUEUE_REGISTRY_SIZE > 0)
1802 if (hMutex != NULL) {
1803 if ((attr != NULL) && (attr->name != NULL)) {
1804 /* Only non-NULL name objects are added to the Queue Registry */
1805 vQueueAddToRegistry (hMutex, attr->name);
1810 if ((hMutex != NULL) && (rmtx != 0U)) {
1811 /* Set LSB as 'recursive mutex flag' */
1812 hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
1817 /* Return mutex ID */
1818 return ((osMutexId_t)hMutex);
1822 Acquire a Mutex or timeout if it is locked.
1824 osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
1825 SemaphoreHandle_t hMutex;
1829 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1831 /* Extract recursive mutex flag */
1832 rmtx = (uint32_t)mutex_id & 1U;
1836 if (IRQ_Context() != 0U) {
1839 else if (hMutex == NULL) {
1840 stat = osErrorParameter;
1844 #if (configUSE_RECURSIVE_MUTEXES == 1)
1845 if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
1846 if (timeout != 0U) {
1847 stat = osErrorTimeout;
1849 stat = osErrorResource;
1855 if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
1856 if (timeout != 0U) {
1857 stat = osErrorTimeout;
1859 stat = osErrorResource;
1865 /* Return execution status */
1870 Release a Mutex that was acquired by osMutexAcquire.
1872 osStatus_t osMutexRelease (osMutexId_t mutex_id) {
1873 SemaphoreHandle_t hMutex;
1877 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1879 /* Extract recursive mutex flag */
1880 rmtx = (uint32_t)mutex_id & 1U;
1884 if (IRQ_Context() != 0U) {
1887 else if (hMutex == NULL) {
1888 stat = osErrorParameter;
1892 #if (configUSE_RECURSIVE_MUTEXES == 1)
1893 if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
1894 stat = osErrorResource;
1899 if (xSemaphoreGive (hMutex) != pdPASS) {
1900 stat = osErrorResource;
1905 /* Return execution status */
1910 Get Thread which owns a Mutex object.
1912 osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
1913 SemaphoreHandle_t hMutex;
1916 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1918 if ((IRQ_Context() != 0U) || (hMutex == NULL)) {
1921 owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
1924 /* Return owner thread ID */
1929 Delete a Mutex object.
1931 osStatus_t osMutexDelete (osMutexId_t mutex_id) {
1933 #ifndef USE_FreeRTOS_HEAP_1
1934 SemaphoreHandle_t hMutex;
1936 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1938 if (IRQ_Context() != 0U) {
1941 else if (hMutex == NULL) {
1942 stat = osErrorParameter;
1945 #if (configQUEUE_REGISTRY_SIZE > 0)
1946 vQueueUnregisterQueue (hMutex);
1949 vSemaphoreDelete (hMutex);
1955 /* Return execution status */
1958 #endif /* (configUSE_OS2_MUTEX == 1) */
1961 /* ==== Semaphore Management Functions ==== */
1964 Create and Initialize a Semaphore object.
1966 osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
1967 SemaphoreHandle_t hSemaphore;
1972 if ((IRQ_Context() == 0U) && (max_count > 0U) && (initial_count <= max_count)) {
1976 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1977 /* The memory for control block is provided, use static object */
1981 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1982 /* Control block will be allocated from the dynamic pool */
1992 if (max_count == 1U) {
1994 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1995 hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
1999 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2000 hSemaphore = xSemaphoreCreateBinary();
2004 if ((hSemaphore != NULL) && (initial_count != 0U)) {
2005 if (xSemaphoreGive (hSemaphore) != pdPASS) {
2006 vSemaphoreDelete (hSemaphore);
2013 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2014 hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
2018 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2019 hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
2024 #if (configQUEUE_REGISTRY_SIZE > 0)
2025 if (hSemaphore != NULL) {
2026 if ((attr != NULL) && (attr->name != NULL)) {
2027 /* Only non-NULL name objects are added to the Queue Registry */
2028 vQueueAddToRegistry (hSemaphore, attr->name);
2035 /* Return semaphore ID */
2036 return ((osSemaphoreId_t)hSemaphore);
2040 Acquire a Semaphore token or timeout if no tokens are available.
2042 osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
2043 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2049 if (hSemaphore == NULL) {
2050 stat = osErrorParameter;
2052 else if (IRQ_Context() != 0U) {
2053 if (timeout != 0U) {
2054 stat = osErrorParameter;
2059 if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
2060 stat = osErrorResource;
2062 portYIELD_FROM_ISR (yield);
2067 if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
2068 if (timeout != 0U) {
2069 stat = osErrorTimeout;
2071 stat = osErrorResource;
2076 /* Return execution status */
2081 Release a Semaphore token up to the initial maximum count.
2083 osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
2084 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2090 if (hSemaphore == NULL) {
2091 stat = osErrorParameter;
2093 else if (IRQ_Context() != 0U) {
2096 if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
2097 stat = osErrorResource;
2099 portYIELD_FROM_ISR (yield);
2103 if (xSemaphoreGive (hSemaphore) != pdPASS) {
2104 stat = osErrorResource;
2108 /* Return execution status */
2113 Get current Semaphore token count.
2115 uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
2116 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2119 if (hSemaphore == NULL) {
2122 else if (IRQ_Context() != 0U) {
2123 count = (uint32_t)uxSemaphoreGetCountFromISR (hSemaphore);
2125 count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
2128 /* Return number of tokens */
2133 Delete a Semaphore object.
2135 osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
2136 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2139 #ifndef USE_FreeRTOS_HEAP_1
2140 if (IRQ_Context() != 0U) {
2143 else if (hSemaphore == NULL) {
2144 stat = osErrorParameter;
2147 #if (configQUEUE_REGISTRY_SIZE > 0)
2148 vQueueUnregisterQueue (hSemaphore);
2152 vSemaphoreDelete (hSemaphore);
2158 /* Return execution status */
2163 /* ==== Message Queue Management Functions ==== */
2166 Create and Initialize a Message Queue object.
2169 - The memory for control block and and message data must be provided in the
2170 osThreadAttr_t structure in order to allocate object statically.
2172 osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
2173 QueueHandle_t hQueue;
2178 if ((IRQ_Context() == 0U) && (msg_count > 0U) && (msg_size > 0U)) {
2182 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
2183 (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
2184 /* The memory for control block and message data is provided, use static object */
2188 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
2189 (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
2190 /* Control block will be allocated from the dynamic pool */
2200 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2201 hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
2206 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2207 hQueue = xQueueCreate (msg_count, msg_size);
2212 #if (configQUEUE_REGISTRY_SIZE > 0)
2213 if (hQueue != NULL) {
2214 if ((attr != NULL) && (attr->name != NULL)) {
2215 /* Only non-NULL name objects are added to the Queue Registry */
2216 vQueueAddToRegistry (hQueue, attr->name);
2223 /* Return message queue ID */
2224 return ((osMessageQueueId_t)hQueue);
2228 Put a Message into a Queue or timeout if Queue is full.
2231 - Message priority is ignored
2233 osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
2234 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2238 (void)msg_prio; /* Message priority is ignored */
2242 if (IRQ_Context() != 0U) {
2243 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2244 stat = osErrorParameter;
2249 if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
2250 stat = osErrorResource;
2252 portYIELD_FROM_ISR (yield);
2257 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2258 stat = osErrorParameter;
2261 if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2262 if (timeout != 0U) {
2263 stat = osErrorTimeout;
2265 stat = osErrorResource;
2271 /* Return execution status */
2276 Get a Message from a Queue or timeout if Queue is empty.
2279 - Message priority is ignored
2281 osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
2282 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2286 (void)msg_prio; /* Message priority is ignored */
2290 if (IRQ_Context() != 0U) {
2291 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2292 stat = osErrorParameter;
2297 if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
2298 stat = osErrorResource;
2300 portYIELD_FROM_ISR (yield);
2305 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2306 stat = osErrorParameter;
2309 if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2310 if (timeout != 0U) {
2311 stat = osErrorTimeout;
2313 stat = osErrorResource;
2319 /* Return execution status */
2324 Get maximum number of messages in a Message Queue.
2326 uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
2327 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2330 if (hQueue == NULL) {
2333 capacity = uxQueueGetQueueLength (hQueue);
2336 /* Return maximum number of messages */
2341 Get maximum message size in a Message Queue.
2343 uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
2344 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2347 if (hQueue == NULL) {
2350 size = uxQueueGetQueueItemSize (hQueue);
2353 /* Return maximum message size */
2358 Get number of queued messages in a Message Queue.
2360 uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
2361 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2364 if (hQueue == NULL) {
2367 else if (IRQ_Context() != 0U) {
2368 count = uxQueueMessagesWaitingFromISR (hQueue);
2371 count = uxQueueMessagesWaiting (hQueue);
2374 /* Return number of queued messages */
2375 return ((uint32_t)count);
2379 Get number of available slots for messages in a Message Queue.
2381 uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
2382 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2386 if (hQueue == NULL) {
2389 else if (IRQ_Context() != 0U) {
2390 isrm = taskENTER_CRITICAL_FROM_ISR();
2392 space = uxQueueGetQueueLength (hQueue) - uxQueueMessagesWaiting (hQueue);
2394 taskEXIT_CRITICAL_FROM_ISR(isrm);
2397 space = (uint32_t)uxQueueSpacesAvailable (hQueue);
2400 /* Return number of available slots */
2405 Reset a Message Queue to initial empty state.
2407 osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
2408 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2411 if (IRQ_Context() != 0U) {
2414 else if (hQueue == NULL) {
2415 stat = osErrorParameter;
2419 (void)xQueueReset (hQueue);
2422 /* Return execution status */
2427 Delete a Message Queue object.
2429 osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
2430 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2433 #ifndef USE_FreeRTOS_HEAP_1
2434 if (IRQ_Context() != 0U) {
2437 else if (hQueue == NULL) {
2438 stat = osErrorParameter;
2441 #if (configQUEUE_REGISTRY_SIZE > 0)
2442 vQueueUnregisterQueue (hQueue);
2446 vQueueDelete (hQueue);
2452 /* Return execution status */
2457 /* ==== Memory Pool Management Functions ==== */
2459 #ifdef FREERTOS_MPOOL_H_
2460 /* Static memory pool functions */
2461 static void FreeBlock (MemPool_t *mp, void *block);
2462 static void *AllocBlock (MemPool_t *mp);
2463 static void *CreateBlock (MemPool_t *mp);
2466 Create and Initialize a Memory Pool object.
2468 osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
2471 int32_t mem_cb, mem_mp;
2474 if (IRQ_Context() != 0U) {
2477 else if ((block_count == 0U) || (block_size == 0U)) {
2482 sz = MEMPOOL_ARR_SIZE (block_count, block_size);
2489 /* Take the name from attributes */
2492 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(MemPool_t))) {
2493 /* Static control block is provided */
2496 else if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
2497 /* Allocate control block memory on heap */
2501 if ((attr->mp_mem == NULL) && (attr->mp_size == 0U)) {
2502 /* Allocate memory array on heap */
2506 if (attr->mp_mem != NULL) {
2507 /* Check if array is 4-byte aligned */
2508 if (((uint32_t)attr->mp_mem & 3U) == 0U) {
2509 /* Check if array big enough */
2510 if (attr->mp_size >= sz) {
2511 /* Static memory pool array is provided */
2519 /* Attributes not provided, allocate memory on heap */
2525 mp = pvPortMalloc (sizeof(MemPool_t));
2535 /* Create a semaphore (max count == initial count == block_count) */
2536 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2537 mp->sem = xSemaphoreCreateCountingStatic (block_count, block_count, &mp->mem_sem);
2538 #elif (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2539 mp->sem = xSemaphoreCreateCounting (block_count, block_count);
2544 if (mp->sem != NULL) {
2545 /* Setup memory array */
2547 mp->mem_arr = pvPortMalloc (sz);
2550 mp->mem_arr = attr->mp_mem;
2556 if ((mp != NULL) && (mp->mem_arr != NULL)) {
2557 /* Memory pool can be created */
2561 mp->bl_sz = block_size;
2562 mp->bl_cnt = block_count;
2565 /* Set heap allocated memory flags */
2566 mp->status = MPOOL_STATUS;
2569 /* Control block on heap */
2573 /* Memory array on heap */
2578 /* Memory pool cannot be created, release allocated resources */
2579 if ((mem_cb == 0) && (mp != NULL)) {
2580 /* Free control block memory */
2587 /* Return memory pool ID */
2592 Get name of a Memory Pool object.
2594 const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
2595 MemPool_t *mp = (osMemoryPoolId_t)mp_id;
2598 if (mp_id == NULL) {
2601 else if (IRQ_Context() != 0U) {
2608 /* Return name as null-terminated string */
2613 Allocate a memory block from a Memory Pool.
2615 void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
2620 if (mp_id == NULL) {
2621 /* Invalid input parameters */
2627 mp = (MemPool_t *)mp_id;
2629 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2630 if (IRQ_Context() != 0U) {
2631 if (timeout == 0U) {
2632 if (xSemaphoreTakeFromISR (mp->sem, NULL) == pdTRUE) {
2633 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2634 isrm = taskENTER_CRITICAL_FROM_ISR();
2636 /* Get a block from the free-list */
2637 block = AllocBlock(mp);
2639 if (block == NULL) {
2640 /* List of free blocks is empty, 'create' new block */
2641 block = CreateBlock(mp);
2644 taskEXIT_CRITICAL_FROM_ISR(isrm);
2650 if (xSemaphoreTake (mp->sem, (TickType_t)timeout) == pdTRUE) {
2651 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2652 taskENTER_CRITICAL();
2654 /* Get a block from the free-list */
2655 block = AllocBlock(mp);
2657 if (block == NULL) {
2658 /* List of free blocks is empty, 'create' new block */
2659 block = CreateBlock(mp);
2662 taskEXIT_CRITICAL();
2669 /* Return memory block address */
2674 Return an allocated memory block back to a Memory Pool.
2676 osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
2682 if ((mp_id == NULL) || (block == NULL)) {
2683 /* Invalid input parameters */
2684 stat = osErrorParameter;
2687 mp = (MemPool_t *)mp_id;
2689 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2690 /* Invalid object status */
2691 stat = osErrorResource;
2693 else if ((block < (void *)&mp->mem_arr[0]) || (block > (void*)&mp->mem_arr[mp->mem_sz-1])) {
2694 /* Block pointer outside of memory array area */
2695 stat = osErrorParameter;
2700 if (IRQ_Context() != 0U) {
2701 if (uxSemaphoreGetCountFromISR (mp->sem) == mp->bl_cnt) {
2702 stat = osErrorResource;
2705 isrm = taskENTER_CRITICAL_FROM_ISR();
2707 /* Add block to the list of free blocks */
2708 FreeBlock(mp, block);
2710 taskEXIT_CRITICAL_FROM_ISR(isrm);
2713 xSemaphoreGiveFromISR (mp->sem, &yield);
2714 portYIELD_FROM_ISR (yield);
2718 if (uxSemaphoreGetCount (mp->sem) == mp->bl_cnt) {
2719 stat = osErrorResource;
2722 taskENTER_CRITICAL();
2724 /* Add block to the list of free blocks */
2725 FreeBlock(mp, block);
2727 taskEXIT_CRITICAL();
2729 xSemaphoreGive (mp->sem);
2735 /* Return execution status */
2740 Get maximum number of memory blocks in a Memory Pool.
2742 uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
2746 if (mp_id == NULL) {
2747 /* Invalid input parameters */
2751 mp = (MemPool_t *)mp_id;
2753 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2754 /* Invalid object status */
2762 /* Return maximum number of memory blocks */
2767 Get memory block size in a Memory Pool.
2769 uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
2773 if (mp_id == NULL) {
2774 /* Invalid input parameters */
2778 mp = (MemPool_t *)mp_id;
2780 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2781 /* Invalid object status */
2789 /* Return memory block size in bytes */
2794 Get number of memory blocks used in a Memory Pool.
2796 uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
2800 if (mp_id == NULL) {
2801 /* Invalid input parameters */
2805 mp = (MemPool_t *)mp_id;
2807 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2808 /* Invalid object status */
2812 if (IRQ_Context() != 0U) {
2813 n = uxSemaphoreGetCountFromISR (mp->sem);
2815 n = uxSemaphoreGetCount (mp->sem);
2822 /* Return number of memory blocks used */
2827 Get number of memory blocks available in a Memory Pool.
2829 uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
2833 if (mp_id == NULL) {
2834 /* Invalid input parameters */
2838 mp = (MemPool_t *)mp_id;
2840 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2841 /* Invalid object status */
2845 if (IRQ_Context() != 0U) {
2846 n = uxSemaphoreGetCountFromISR (mp->sem);
2848 n = uxSemaphoreGetCount (mp->sem);
2853 /* Return number of memory blocks available */
2858 Delete a Memory Pool object.
2860 osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
2864 if (mp_id == NULL) {
2865 /* Invalid input parameters */
2866 stat = osErrorParameter;
2868 else if (IRQ_Context() != 0U) {
2872 mp = (MemPool_t *)mp_id;
2874 taskENTER_CRITICAL();
2876 /* Invalidate control block status */
2877 mp->status = mp->status & 3U;
2879 /* Wake-up tasks waiting for pool semaphore */
2880 while (xSemaphoreGive (mp->sem) == pdTRUE);
2886 if ((mp->status & 2U) != 0U) {
2887 /* Memory pool array allocated on heap */
2888 vPortFree (mp->mem_arr);
2890 if ((mp->status & 1U) != 0U) {
2891 /* Memory pool control block allocated on heap */
2895 taskEXIT_CRITICAL();
2900 /* Return execution status */
2905 Create new block given according to the current block index.
2907 static void *CreateBlock (MemPool_t *mp) {
2908 MemPoolBlock_t *p = NULL;
2910 if (mp->n < mp->bl_cnt) {
2911 /* Unallocated blocks exist, set pointer to new block */
2912 p = (void *)(mp->mem_arr + (mp->bl_sz * mp->n));
2914 /* Increment block index */
2922 Allocate a block by reading the list of free blocks.
2924 static void *AllocBlock (MemPool_t *mp) {
2925 MemPoolBlock_t *p = NULL;
2927 if (mp->head != NULL) {
2928 /* List of free block exists, get head block */
2931 /* Head block is now next on the list */
2939 Free block by putting it to the list of free blocks.
2941 static void FreeBlock (MemPool_t *mp, void *block) {
2942 MemPoolBlock_t *p = block;
2944 /* Store current head into block memory space */
2947 /* Store current block as new head */
2950 #endif /* FREERTOS_MPOOL_H_ */
2951 /*---------------------------------------------------------------------------*/
2953 /* Callback function prototypes */
2954 extern void vApplicationIdleHook (void);
2955 extern void vApplicationMallocFailedHook (void);
2956 extern void vApplicationDaemonTaskStartupHook (void);
2959 Dummy implementation of the callback function vApplicationIdleHook().
2961 #if (configUSE_IDLE_HOOK == 1)
2962 __WEAK void vApplicationIdleHook (void){}
2966 Dummy implementation of the callback function vApplicationTickHook().
2968 #if (configUSE_TICK_HOOK == 1)
2969 __WEAK void vApplicationTickHook (void){}
2973 Dummy implementation of the callback function vApplicationMallocFailedHook().
2975 #if (configUSE_MALLOC_FAILED_HOOK == 1)
2976 __WEAK void vApplicationMallocFailedHook (void) {
2977 /* Assert when malloc failed hook is enabled but no application defined function exists */
2983 Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
2985 #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
2986 __WEAK void vApplicationDaemonTaskStartupHook (void){}
2990 Dummy implementation of the callback function vApplicationStackOverflowHook().
2992 #if (configCHECK_FOR_STACK_OVERFLOW > 0)
2993 __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, char *pcTaskName) {
2997 /* Assert when stack overflow is enabled but no application defined function exists */