1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
4 * SPDX-License-Identifier: Apache-2.0
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
21 *---------------------------------------------------------------------------*/
25 #include "cmsis_os2.h" // ::CMSIS:RTOS2
26 #include "cmsis_compiler.h" // Compiler agnostic definitions
27 #include "os_tick.h" // OS Tick API
29 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
30 #include "task.h" // ARM.FreeRTOS::RTOS:Core
31 #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
32 #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
33 #include "timers.h" // ARM.FreeRTOS::RTOS:Timers
35 #include "freertos_mpool.h" // osMemoryPool definitions
36 #include "freertos_os2.h" // Configuration check and setup
38 /*---------------------------------------------------------------------------*/
39 #ifndef __ARM_ARCH_6M__
40 #define __ARM_ARCH_6M__ 0
42 #ifndef __ARM_ARCH_7M__
43 #define __ARM_ARCH_7M__ 0
45 #ifndef __ARM_ARCH_7EM__
46 #define __ARM_ARCH_7EM__ 0
48 #ifndef __ARM_ARCH_8M_MAIN__
49 #define __ARM_ARCH_8M_MAIN__ 0
51 #ifndef __ARM_ARCH_7A__
52 #define __ARM_ARCH_7A__ 0
55 #if ((__ARM_ARCH_7M__ == 1U) || \
56 (__ARM_ARCH_7EM__ == 1U) || \
57 (__ARM_ARCH_8M_MAIN__ == 1U))
58 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U))
59 #elif (__ARM_ARCH_6M__ == 1U)
60 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
61 #elif (__ARM_ARCH_7A__ == 1U)
63 #define CPSR_MASKBIT_I 0x80U
65 #define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U)
67 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
70 #if (__ARM_ARCH_7A__ == 1U)
71 /* CPSR mode bitmasks */
72 #define CPSR_MODE_USER 0x10U
73 #define CPSR_MODE_SYSTEM 0x1FU
75 #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
77 #define IS_IRQ_MODE() (__get_IPSR() != 0U)
81 #define MAX_BITS_TASK_NOTIFY 31U
82 #define MAX_BITS_EVENT_GROUPS 24U
84 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
85 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
87 /* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */
88 #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
89 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
90 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
92 #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
94 /* Timer callback information structure definition */
100 /* Kernel initialization state */
101 static osKernelState_t KernelState = osKernelInactive;
104 Heap region definition used by heap_5 variant
106 Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
107 heap regions are already defined and vPortDefineHeapRegions is called in application.
109 Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
110 definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
111 is possible by defining it globally or in FreeRTOSConfig.h.
113 #if defined(USE_FreeRTOS_HEAP_5)
114 #if (configAPPLICATION_ALLOCATED_HEAP == 0)
116 FreeRTOS heap is not defined by the application.
117 Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
118 is provided by default. Define configHEAP_5_REGIONS to provide custom
121 #define HEAP_5_REGION_SETUP 1
123 #ifndef configHEAP_5_REGIONS
124 #define configHEAP_5_REGIONS xHeapRegions
126 static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
128 static HeapRegion_t xHeapRegions[] = {
129 { ucHeap, configTOTAL_HEAP_SIZE },
133 /* Global definition is provided to override default heap array */
134 extern HeapRegion_t configHEAP_5_REGIONS[];
138 The application already defined the array used for the FreeRTOS heap and
139 called vPortDefineHeapRegions to initialize heap.
141 #define HEAP_5_REGION_SETUP 0
142 #endif /* configAPPLICATION_ALLOCATED_HEAP */
143 #endif /* USE_FreeRTOS_HEAP_5 */
146 #undef SysTick_Handler
148 /* CMSIS SysTick interrupt handler prototype */
149 extern void SysTick_Handler (void);
150 /* FreeRTOS tick timer interrupt handler prototype */
151 extern void xPortSysTickHandler (void);
154 SysTick handler implementation that also clears overflow flag.
156 void SysTick_Handler (void) {
157 #if (configUSE_TICKLESS_IDLE == 0)
158 /* Clear overflow flag */
162 if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
163 /* Call tick handler */
164 xPortSysTickHandler();
170 Setup SVC to reset value.
172 __STATIC_INLINE void SVC_Setup (void) {
173 #if (__ARM_ARCH_7A__ == 0U)
174 /* Service Call interrupt might be configured before kernel start */
175 /* and when its priority is lower or equal to BASEPRI, svc instruction */
176 /* causes a Hard Fault. */
177 NVIC_SetPriority (SVCall_IRQn, 0U);
182 Function macro used to retrieve semaphore count from ISR
184 #ifndef uxSemaphoreGetCountFromISR
185 #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
189 Determine if CPU executes from interrupt context or if interrupts are masked.
191 __STATIC_INLINE uint32_t IRQ_Context (void) {
198 /* Called from interrupt context */
202 /* Get FreeRTOS scheduler state */
203 state = xTaskGetSchedulerState();
205 if (state != taskSCHEDULER_NOT_STARTED) {
206 /* Scheduler was started */
207 if (IS_IRQ_MASKED()) {
208 /* Interrupts are masked */
214 /* Return context, 0: thread context, 1: IRQ context */
219 /* ==== Kernel Management Functions ==== */
222 Initialize the RTOS Kernel.
224 osStatus_t osKernelInitialize (void) {
228 if (IRQ_Context() != 0U) {
232 state = xTaskGetSchedulerState();
234 /* Initialize if scheduler not started and not initialized before */
235 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelInactive)) {
236 #if defined(USE_TRACE_EVENT_RECORDER)
237 /* Initialize the trace macro debugging output channel */
238 EvrFreeRTOSSetup(0U);
240 #if defined(USE_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
241 /* Initialize the memory regions when using heap_5 variant */
242 vPortDefineHeapRegions (configHEAP_5_REGIONS);
244 KernelState = osKernelReady;
251 /* Return execution status */
256 Get RTOS Kernel Information.
258 osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
260 if (version != NULL) {
261 /* Version encoding is major.minor.rev: mmnnnrrrr dec */
262 version->api = KERNEL_VERSION;
263 version->kernel = KERNEL_VERSION;
266 if ((id_buf != NULL) && (id_size != 0U)) {
267 /* Buffer for retrieving identification string is provided */
268 if (id_size > sizeof(KERNEL_ID)) {
269 id_size = sizeof(KERNEL_ID);
271 /* Copy kernel identification string into provided buffer */
272 memcpy(id_buf, KERNEL_ID, id_size);
275 /* Return execution status */
280 Get the current RTOS Kernel state.
282 osKernelState_t osKernelGetState (void) {
283 osKernelState_t state;
285 switch (xTaskGetSchedulerState()) {
286 case taskSCHEDULER_RUNNING:
287 state = osKernelRunning;
290 case taskSCHEDULER_SUSPENDED:
291 state = osKernelLocked;
294 case taskSCHEDULER_NOT_STARTED:
296 if (KernelState == osKernelReady) {
297 /* Ready, osKernelInitialize was already called */
298 state = osKernelReady;
300 /* Not initialized */
301 state = osKernelInactive;
306 /* Return current state */
311 Start the RTOS Kernel scheduler.
313 osStatus_t osKernelStart (void) {
317 if (IRQ_Context() != 0U) {
321 state = xTaskGetSchedulerState();
323 /* Start scheduler if initialized and not started before */
324 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelReady)) {
325 /* Ensure SVC priority is at the reset value */
327 /* Change state to ensure correct API flow */
328 KernelState = osKernelRunning;
329 /* Start the kernel scheduler */
330 vTaskStartScheduler();
337 /* Return execution status */
342 Lock the RTOS Kernel scheduler.
344 int32_t osKernelLock (void) {
347 if (IRQ_Context() != 0U) {
348 lock = (int32_t)osErrorISR;
351 switch (xTaskGetSchedulerState()) {
352 case taskSCHEDULER_SUSPENDED:
356 case taskSCHEDULER_RUNNING:
361 case taskSCHEDULER_NOT_STARTED:
363 lock = (int32_t)osError;
368 /* Return previous lock state */
373 Unlock the RTOS Kernel scheduler.
375 int32_t osKernelUnlock (void) {
378 if (IRQ_Context() != 0U) {
379 lock = (int32_t)osErrorISR;
382 switch (xTaskGetSchedulerState()) {
383 case taskSCHEDULER_SUSPENDED:
386 if (xTaskResumeAll() != pdTRUE) {
387 if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
388 lock = (int32_t)osError;
393 case taskSCHEDULER_RUNNING:
397 case taskSCHEDULER_NOT_STARTED:
399 lock = (int32_t)osError;
404 /* Return previous lock state */
409 Restore the RTOS Kernel scheduler lock state.
411 int32_t osKernelRestoreLock (int32_t lock) {
413 if (IRQ_Context() != 0U) {
414 lock = (int32_t)osErrorISR;
417 switch (xTaskGetSchedulerState()) {
418 case taskSCHEDULER_SUSPENDED:
419 case taskSCHEDULER_RUNNING:
425 lock = (int32_t)osError;
428 if (xTaskResumeAll() != pdTRUE) {
429 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
430 lock = (int32_t)osError;
437 case taskSCHEDULER_NOT_STARTED:
439 lock = (int32_t)osError;
444 /* Return new lock state */
449 Get the RTOS kernel tick count.
451 uint32_t osKernelGetTickCount (void) {
454 if (IRQ_Context() != 0U) {
455 ticks = xTaskGetTickCountFromISR();
457 ticks = xTaskGetTickCount();
460 /* Return kernel tick count */
465 Get the RTOS kernel tick frequency.
467 uint32_t osKernelGetTickFreq (void) {
468 /* Return frequency in hertz */
469 return (configTICK_RATE_HZ);
473 Get the RTOS kernel system timer count.
475 uint32_t osKernelGetSysTimerCount (void) {
476 uint32_t irqmask = IS_IRQ_MASKED();
479 #if (configUSE_TICKLESS_IDLE != 0)
482 /* Low Power Tickless Idle controls timer overflow flag and therefore */
483 /* OS_Tick_GetOverflow may be non-functional. As a workaround a reference */
484 /* time is measured here before disabling interrupts. Timer value overflow */
485 /* is then checked by comparing reference against latest time measurement. */
486 /* Timer count value returned by this method is less accurate but if an */
487 /* overflow is missed, an invalid timer count would be returned. */
488 val0 = OS_Tick_GetCount();
495 ticks = xTaskGetTickCount();
496 val = OS_Tick_GetCount();
498 /* Update tick count and timer value when timer overflows */
499 #if (configUSE_TICKLESS_IDLE != 0)
504 if (OS_Tick_GetOverflow() != 0U) {
505 val = OS_Tick_GetCount();
510 val += ticks * OS_Tick_GetInterval();
516 /* Return system timer count */
521 Get the RTOS kernel system timer frequency.
523 uint32_t osKernelGetSysTimerFreq (void) {
524 /* Return frequency in hertz */
525 return (configCPU_CLOCK_HZ);
529 /* ==== Thread Management Functions ==== */
532 Create a thread and add it to Active Threads.
535 - The memory for control block and stack must be provided in the osThreadAttr_t
536 structure in order to allocate object statically.
537 - Attribute osThreadJoinable is not supported, NULL is returned if used.
539 osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
548 if ((IRQ_Context() == 0U) && (func != NULL)) {
549 stack = configMINIMAL_STACK_SIZE;
550 prio = (UBaseType_t)osPriorityNormal;
556 if (attr->name != NULL) {
559 if (attr->priority != osPriorityNone) {
560 prio = (UBaseType_t)attr->priority;
563 if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
564 /* Invalid priority or unsupported osThreadJoinable attribute used */
568 if (attr->stack_size > 0U) {
569 /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
570 /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
571 stack = attr->stack_size / sizeof(StackType_t);
574 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
575 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
576 /* The memory for control block and stack is provided, use static object */
580 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
581 /* Control block and stack memory will be allocated from the dynamic pool */
591 #if (configSUPPORT_STATIC_ALLOCATION == 1)
592 hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem,
593 (StaticTask_t *)attr->cb_mem);
598 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
599 if (xTaskCreate ((TaskFunction_t)func, name, (configSTACK_DEPTH_TYPE)stack, argument, prio, &hTask) != pdPASS) {
607 /* Return thread ID */
608 return ((osThreadId_t)hTask);
612 Get name of a thread.
614 const char *osThreadGetName (osThreadId_t thread_id) {
615 TaskHandle_t hTask = (TaskHandle_t)thread_id;
618 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
621 name = pcTaskGetName (hTask);
624 /* Return name as null-terminated string */
629 Return the thread ID of the current running thread.
631 osThreadId_t osThreadGetId (void) {
634 id = (osThreadId_t)xTaskGetCurrentTaskHandle();
636 /* Return thread ID */
641 Get current thread state of a thread.
643 osThreadState_t osThreadGetState (osThreadId_t thread_id) {
644 TaskHandle_t hTask = (TaskHandle_t)thread_id;
645 osThreadState_t state;
647 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
648 state = osThreadError;
651 switch (eTaskGetState (hTask)) {
652 case eRunning: state = osThreadRunning; break;
653 case eReady: state = osThreadReady; break;
655 case eSuspended: state = osThreadBlocked; break;
658 default: state = osThreadError; break;
662 /* Return current thread state */
667 Get available stack space of a thread based on stack watermark recording during execution.
669 uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
670 TaskHandle_t hTask = (TaskHandle_t)thread_id;
673 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
676 sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
679 /* Return remaining stack space in bytes */
684 Change priority of a thread.
686 osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
687 TaskHandle_t hTask = (TaskHandle_t)thread_id;
690 if (IRQ_Context() != 0U) {
693 else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
694 stat = osErrorParameter;
698 vTaskPrioritySet (hTask, (UBaseType_t)priority);
701 /* Return execution status */
706 Get current priority of a thread.
708 osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
709 TaskHandle_t hTask = (TaskHandle_t)thread_id;
712 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
713 prio = osPriorityError;
715 prio = (osPriority_t)((int32_t)uxTaskPriorityGet (hTask));
718 /* Return current thread priority */
723 Pass control to next thread that is in state READY.
725 osStatus_t osThreadYield (void) {
728 if (IRQ_Context() != 0U) {
735 /* Return execution status */
739 #if (configUSE_OS2_THREAD_SUSPEND_RESUME == 1)
741 Suspend execution of a thread.
743 osStatus_t osThreadSuspend (osThreadId_t thread_id) {
744 TaskHandle_t hTask = (TaskHandle_t)thread_id;
747 if (IRQ_Context() != 0U) {
750 else if (hTask == NULL) {
751 stat = osErrorParameter;
755 vTaskSuspend (hTask);
758 /* Return execution status */
763 Resume execution of a thread.
765 osStatus_t osThreadResume (osThreadId_t thread_id) {
766 TaskHandle_t hTask = (TaskHandle_t)thread_id;
769 if (IRQ_Context() != 0U) {
772 else if (hTask == NULL) {
773 stat = osErrorParameter;
780 /* Return execution status */
783 #endif /* (configUSE_OS2_THREAD_SUSPEND_RESUME == 1) */
786 Terminate execution of current running thread.
788 __NO_RETURN void osThreadExit (void) {
789 #ifndef USE_FreeRTOS_HEAP_1
796 Terminate execution of a thread.
798 osStatus_t osThreadTerminate (osThreadId_t thread_id) {
799 TaskHandle_t hTask = (TaskHandle_t)thread_id;
801 #ifndef USE_FreeRTOS_HEAP_1
804 if (IRQ_Context() != 0U) {
807 else if (hTask == NULL) {
808 stat = osErrorParameter;
811 tstate = eTaskGetState (hTask);
813 if (tstate != eDeleted) {
817 stat = osErrorResource;
824 /* Return execution status */
829 Get number of active threads.
831 uint32_t osThreadGetCount (void) {
834 if (IRQ_Context() != 0U) {
837 count = uxTaskGetNumberOfTasks();
840 /* Return number of active threads */
844 #if (configUSE_OS2_THREAD_ENUMERATE == 1)
846 Enumerate active threads.
848 uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
852 if ((IRQ_Context() != 0U) || (thread_array == NULL) || (array_items == 0U)) {
857 /* Allocate memory on heap to temporarily store TaskStatus_t information */
858 count = uxTaskGetNumberOfTasks();
859 task = pvPortMalloc (count * sizeof(TaskStatus_t));
862 /* Retrieve task status information */
863 count = uxTaskGetSystemState (task, count, NULL);
865 /* Copy handles from task status array into provided thread array */
866 for (i = 0U; (i < count) && (i < array_items); i++) {
867 thread_array[i] = (osThreadId_t)task[i].xHandle;
871 (void)xTaskResumeAll();
876 /* Return number of enumerated threads */
879 #endif /* (configUSE_OS2_THREAD_ENUMERATE == 1) */
882 /* ==== Thread Flags Functions ==== */
884 #if (configUSE_OS2_THREAD_FLAGS == 1)
886 Set the specified Thread Flags of a thread.
888 uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
889 TaskHandle_t hTask = (TaskHandle_t)thread_id;
893 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
894 rflags = (uint32_t)osErrorParameter;
897 rflags = (uint32_t)osError;
899 if (IRQ_Context() != 0U) {
902 (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield);
903 (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL);
905 portYIELD_FROM_ISR (yield);
908 (void)xTaskNotify (hTask, flags, eSetBits);
909 (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags);
912 /* Return flags after setting */
917 Clear the specified Thread Flags of current running thread.
919 uint32_t osThreadFlagsClear (uint32_t flags) {
921 uint32_t rflags, cflags;
923 if (IRQ_Context() != 0U) {
924 rflags = (uint32_t)osErrorISR;
926 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
927 rflags = (uint32_t)osErrorParameter;
930 hTask = xTaskGetCurrentTaskHandle();
932 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) {
936 if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
937 rflags = (uint32_t)osError;
941 rflags = (uint32_t)osError;
945 /* Return flags before clearing */
950 Get the current Thread Flags of current running thread.
952 uint32_t osThreadFlagsGet (void) {
956 if (IRQ_Context() != 0U) {
957 rflags = (uint32_t)osErrorISR;
960 hTask = xTaskGetCurrentTaskHandle();
962 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) {
963 rflags = (uint32_t)osError;
967 /* Return current flags */
972 Wait for one or more Thread Flags of the current running thread to become signaled.
974 uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
975 uint32_t rflags, nval;
977 TickType_t t0, td, tout;
980 if (IRQ_Context() != 0U) {
981 rflags = (uint32_t)osErrorISR;
983 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
984 rflags = (uint32_t)osErrorParameter;
987 if ((options & osFlagsNoClear) == osFlagsNoClear) {
996 t0 = xTaskGetTickCount();
998 rval = xTaskNotifyWait (0, clear, &nval, tout);
1000 if (rval == pdPASS) {
1004 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
1005 if ((flags & rflags) == flags) {
1008 if (timeout == 0U) {
1009 rflags = (uint32_t)osErrorResource;
1015 if ((flags & rflags) != 0) {
1018 if (timeout == 0U) {
1019 rflags = (uint32_t)osErrorResource;
1025 /* Update timeout */
1026 td = xTaskGetTickCount() - t0;
1031 tout = timeout - td;
1036 rflags = (uint32_t)osErrorResource;
1038 rflags = (uint32_t)osErrorTimeout;
1042 while (rval != pdFAIL);
1045 /* Return flags before clearing */
1048 #endif /* (configUSE_OS2_THREAD_FLAGS == 1) */
1051 /* ==== Generic Wait Functions ==== */
1054 Wait for Timeout (Time Delay).
1056 osStatus_t osDelay (uint32_t ticks) {
1059 if (IRQ_Context() != 0U) {
1070 /* Return execution status */
1075 Wait until specified time.
1077 osStatus_t osDelayUntil (uint32_t ticks) {
1078 TickType_t tcnt, delay;
1081 if (IRQ_Context() != 0U) {
1086 tcnt = xTaskGetTickCount();
1088 /* Determine remaining number of ticks to delay */
1089 delay = (TickType_t)ticks - tcnt;
1091 /* Check if target tick has not expired */
1092 if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
1093 if (xTaskDelayUntil (&tcnt, delay) == pdFALSE) {
1100 /* No delay or already expired */
1101 stat = osErrorParameter;
1105 /* Return execution status */
1110 /* ==== Timer Management Functions ==== */
1112 #if (configUSE_OS2_TIMER == 1)
1114 static void TimerCallback (TimerHandle_t hTimer) {
1115 TimerCallback_t *callb;
1117 /* Retrieve pointer to callback function and argument */
1118 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1120 /* Remove dynamic allocation flag */
1121 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1123 if (callb != NULL) {
1124 callb->func (callb->arg);
1129 Create and Initialize a timer.
1131 osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
1133 TimerHandle_t hTimer;
1134 TimerCallback_t *callb;
1141 if ((IRQ_Context() == 0U) && (func != NULL)) {
1145 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1146 /* Static memory allocation is available: check if memory for control block */
1147 /* is provided and if it also contains space for callback and its argument */
1148 if ((attr != NULL) && (attr->cb_mem != NULL)) {
1149 if (attr->cb_size >= (sizeof(StaticTimer_t) + sizeof(TimerCallback_t))) {
1150 callb = (TimerCallback_t *)((uint32_t)attr->cb_mem + sizeof(StaticTimer_t));
1155 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1156 /* Dynamic memory allocation is available: if memory for callback and */
1157 /* its argument is not provided, allocate it from dynamic memory pool */
1158 if (callb == NULL) {
1159 callb = (TimerCallback_t *)pvPortMalloc (sizeof(TimerCallback_t));
1161 if (callb != NULL) {
1162 /* Callback memory was allocated from dynamic pool, set flag */
1168 if (callb != NULL) {
1170 callb->arg = argument;
1172 if (type == osTimerOnce) {
1182 if (attr->name != NULL) {
1186 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
1187 /* The memory for control block is provided, use static object */
1191 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1192 /* Control block will be allocated from the dynamic pool */
1200 /* Store callback memory dynamic allocation flag */
1201 callb = (TimerCallback_t *)((uint32_t)callb | callb_dyn);
1203 TimerCallback function is always provided as a callback and is used to call application
1204 specified function with its argument both stored in structure callb.
1207 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1208 hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
1213 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1214 hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
1219 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1220 if ((hTimer == NULL) && (callb != NULL) && (callb_dyn == 1U)) {
1221 /* Failed to create a timer, release allocated resources */
1222 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1230 /* Return timer ID */
1231 return ((osTimerId_t)hTimer);
1235 Get name of a timer.
1237 const char *osTimerGetName (osTimerId_t timer_id) {
1238 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1241 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1244 p = pcTimerGetName (hTimer);
1247 /* Return name as null-terminated string */
1252 Start or restart a timer.
1254 osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
1255 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1258 if (IRQ_Context() != 0U) {
1261 else if ((hTimer == NULL) || (ticks == 0U)) {
1262 stat = osErrorParameter;
1265 if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) {
1268 stat = osErrorResource;
1272 /* Return execution status */
1279 osStatus_t osTimerStop (osTimerId_t timer_id) {
1280 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1283 if (IRQ_Context() != 0U) {
1286 else if (hTimer == NULL) {
1287 stat = osErrorParameter;
1290 if (xTimerIsTimerActive (hTimer) == pdFALSE) {
1291 stat = osErrorResource;
1294 if (xTimerStop (hTimer, 0) == pdPASS) {
1302 /* Return execution status */
1307 Check if a timer is running.
1309 uint32_t osTimerIsRunning (osTimerId_t timer_id) {
1310 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1313 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1316 running = (uint32_t)xTimerIsTimerActive (hTimer);
1319 /* Return 0: not running, 1: running */
1326 osStatus_t osTimerDelete (osTimerId_t timer_id) {
1327 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1329 #ifndef USE_FreeRTOS_HEAP_1
1330 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1331 TimerCallback_t *callb;
1334 if (IRQ_Context() != 0U) {
1337 else if (hTimer == NULL) {
1338 stat = osErrorParameter;
1341 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1342 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1345 if (xTimerDelete (hTimer, 0) == pdPASS) {
1346 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1347 if ((uint32_t)callb & 1U) {
1348 /* Callback memory was allocated from dynamic pool, clear flag */
1349 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1351 /* Return allocated memory to dynamic pool */
1357 stat = osErrorResource;
1364 /* Return execution status */
1367 #endif /* (configUSE_OS2_TIMER == 1) */
1370 /* ==== Event Flags Management Functions ==== */
1373 Create and Initialize an Event Flags object.
1376 - Event flags are limited to 24 bits.
1378 osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
1379 EventGroupHandle_t hEventGroup;
1384 if (IRQ_Context() == 0U) {
1388 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
1389 /* The memory for control block is provided, use static object */
1393 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1394 /* Control block will be allocated from the dynamic pool */
1404 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1405 hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
1410 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1411 hEventGroup = xEventGroupCreate();
1417 /* Return event flags ID */
1418 return ((osEventFlagsId_t)hEventGroup);
1422 Set the specified Event Flags.
1425 - Event flags are limited to 24 bits.
1427 uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
1428 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1432 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1433 rflags = (uint32_t)osErrorParameter;
1435 else if (IRQ_Context() != 0U) {
1436 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1438 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1439 rflags = (uint32_t)osErrorResource;
1443 if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
1444 rflags = (uint32_t)osErrorResource;
1446 /* Retrieve bits that are already set and add flags to be set in current call */
1447 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1449 portYIELD_FROM_ISR (yield);
1454 rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
1457 /* Return event flags after setting */
1462 Clear the specified Event Flags.
1465 - Event flags are limited to 24 bits.
1467 uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
1468 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1471 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1472 rflags = (uint32_t)osErrorParameter;
1474 else if (IRQ_Context() != 0U) {
1475 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1476 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1477 rflags = (uint32_t)osErrorResource;
1479 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1481 if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
1482 rflags = (uint32_t)osErrorResource;
1485 /* xEventGroupClearBitsFromISR only registers clear operation in the timer command queue. */
1486 /* Yield is required here otherwise clear operation might not execute in the right order. */
1487 /* See https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/93 for more info. */
1488 portYIELD_FROM_ISR (pdTRUE);
1493 rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
1496 /* Return event flags before clearing */
1501 Get the current Event Flags.
1504 - Event flags are limited to 24 bits.
1506 uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
1507 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1510 if (ef_id == NULL) {
1513 else if (IRQ_Context() != 0U) {
1514 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1517 rflags = xEventGroupGetBits (hEventGroup);
1520 /* Return current event flags */
1525 Wait for one or more Event Flags to become signaled.
1528 - Event flags are limited to 24 bits.
1529 - osEventFlagsWait cannot be called from an ISR.
1531 uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
1532 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1533 BaseType_t wait_all;
1534 BaseType_t exit_clr;
1537 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1538 rflags = (uint32_t)osErrorParameter;
1540 else if (IRQ_Context() != 0U) {
1541 if (timeout == 0U) {
1542 /* Try semantic is not supported */
1543 rflags = (uint32_t)osErrorISR;
1545 /* Calling osEventFlagsWait from ISR with non-zero timeout is invalid */
1546 rflags = (uint32_t)osFlagsErrorParameter;
1550 if (options & osFlagsWaitAll) {
1556 if (options & osFlagsNoClear) {
1562 rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
1564 if (options & osFlagsWaitAll) {
1565 if ((flags & rflags) != flags) {
1567 rflags = (uint32_t)osErrorTimeout;
1569 rflags = (uint32_t)osErrorResource;
1574 if ((flags & rflags) == 0U) {
1576 rflags = (uint32_t)osErrorTimeout;
1578 rflags = (uint32_t)osErrorResource;
1584 /* Return event flags before clearing */
1589 Delete an Event Flags object.
1591 osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
1592 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1595 #ifndef USE_FreeRTOS_HEAP_1
1596 if (IRQ_Context() != 0U) {
1599 else if (hEventGroup == NULL) {
1600 stat = osErrorParameter;
1604 vEventGroupDelete (hEventGroup);
1610 /* Return execution status */
1615 /* ==== Mutex Management Functions ==== */
1617 #if (configUSE_OS2_MUTEX == 1)
1619 Create and Initialize a Mutex object.
1622 - Priority inherit protocol is used by default, osMutexPrioInherit attribute is ignored.
1623 - Robust mutex is not supported, NULL is returned if used.
1625 osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
1626 SemaphoreHandle_t hMutex;
1633 if (IRQ_Context() == 0U) {
1635 type = attr->attr_bits;
1640 if ((type & osMutexRecursive) == osMutexRecursive) {
1646 if ((type & osMutexRobust) != osMutexRobust) {
1650 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1651 /* The memory for control block is provided, use static object */
1655 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1656 /* Control block will be allocated from the dynamic pool */
1666 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1668 #if (configUSE_RECURSIVE_MUTEXES == 1)
1669 hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
1673 hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
1679 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1681 #if (configUSE_RECURSIVE_MUTEXES == 1)
1682 hMutex = xSemaphoreCreateRecursiveMutex ();
1685 hMutex = xSemaphoreCreateMutex ();
1691 #if (configQUEUE_REGISTRY_SIZE > 0)
1692 if (hMutex != NULL) {
1693 if ((attr != NULL) && (attr->name != NULL)) {
1694 /* Only non-NULL name objects are added to the Queue Registry */
1695 vQueueAddToRegistry (hMutex, attr->name);
1700 if ((hMutex != NULL) && (rmtx != 0U)) {
1701 /* Set LSB as 'recursive mutex flag' */
1702 hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
1707 /* Return mutex ID */
1708 return ((osMutexId_t)hMutex);
1712 Acquire a Mutex or timeout if it is locked.
1714 osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
1715 SemaphoreHandle_t hMutex;
1719 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1721 /* Extract recursive mutex flag */
1722 rmtx = (uint32_t)mutex_id & 1U;
1726 if (IRQ_Context() != 0U) {
1729 else if (hMutex == NULL) {
1730 stat = osErrorParameter;
1734 #if (configUSE_RECURSIVE_MUTEXES == 1)
1735 if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
1736 if (timeout != 0U) {
1737 stat = osErrorTimeout;
1739 stat = osErrorResource;
1745 if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
1746 if (timeout != 0U) {
1747 stat = osErrorTimeout;
1749 stat = osErrorResource;
1755 /* Return execution status */
1760 Release a Mutex that was acquired by osMutexAcquire.
1762 osStatus_t osMutexRelease (osMutexId_t mutex_id) {
1763 SemaphoreHandle_t hMutex;
1767 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1769 /* Extract recursive mutex flag */
1770 rmtx = (uint32_t)mutex_id & 1U;
1774 if (IRQ_Context() != 0U) {
1777 else if (hMutex == NULL) {
1778 stat = osErrorParameter;
1782 #if (configUSE_RECURSIVE_MUTEXES == 1)
1783 if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
1784 stat = osErrorResource;
1789 if (xSemaphoreGive (hMutex) != pdPASS) {
1790 stat = osErrorResource;
1795 /* Return execution status */
1800 Get Thread which owns a Mutex object.
1802 osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
1803 SemaphoreHandle_t hMutex;
1806 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1808 if ((IRQ_Context() != 0U) || (hMutex == NULL)) {
1811 owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
1814 /* Return owner thread ID */
1819 Delete a Mutex object.
1821 osStatus_t osMutexDelete (osMutexId_t mutex_id) {
1823 #ifndef USE_FreeRTOS_HEAP_1
1824 SemaphoreHandle_t hMutex;
1826 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1828 if (IRQ_Context() != 0U) {
1831 else if (hMutex == NULL) {
1832 stat = osErrorParameter;
1835 #if (configQUEUE_REGISTRY_SIZE > 0)
1836 vQueueUnregisterQueue (hMutex);
1839 vSemaphoreDelete (hMutex);
1845 /* Return execution status */
1848 #endif /* (configUSE_OS2_MUTEX == 1) */
1851 /* ==== Semaphore Management Functions ==== */
1854 Create and Initialize a Semaphore object.
1856 osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
1857 SemaphoreHandle_t hSemaphore;
1862 if ((IRQ_Context() == 0U) && (max_count > 0U) && (initial_count <= max_count)) {
1866 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1867 /* The memory for control block is provided, use static object */
1871 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1872 /* Control block will be allocated from the dynamic pool */
1882 if (max_count == 1U) {
1884 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1885 hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
1889 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1890 hSemaphore = xSemaphoreCreateBinary();
1894 if ((hSemaphore != NULL) && (initial_count != 0U)) {
1895 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1896 vSemaphoreDelete (hSemaphore);
1903 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1904 hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
1908 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1909 hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
1914 #if (configQUEUE_REGISTRY_SIZE > 0)
1915 if (hSemaphore != NULL) {
1916 if ((attr != NULL) && (attr->name != NULL)) {
1917 /* Only non-NULL name objects are added to the Queue Registry */
1918 vQueueAddToRegistry (hSemaphore, attr->name);
1925 /* Return semaphore ID */
1926 return ((osSemaphoreId_t)hSemaphore);
1930 Acquire a Semaphore token or timeout if no tokens are available.
1932 osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
1933 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1939 if (hSemaphore == NULL) {
1940 stat = osErrorParameter;
1942 else if (IRQ_Context() != 0U) {
1943 if (timeout != 0U) {
1944 stat = osErrorParameter;
1949 if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
1950 stat = osErrorResource;
1952 portYIELD_FROM_ISR (yield);
1957 if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
1958 if (timeout != 0U) {
1959 stat = osErrorTimeout;
1961 stat = osErrorResource;
1966 /* Return execution status */
1971 Release a Semaphore token up to the initial maximum count.
1973 osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
1974 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1980 if (hSemaphore == NULL) {
1981 stat = osErrorParameter;
1983 else if (IRQ_Context() != 0U) {
1986 if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
1987 stat = osErrorResource;
1989 portYIELD_FROM_ISR (yield);
1993 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1994 stat = osErrorResource;
1998 /* Return execution status */
2003 Get current Semaphore token count.
2005 uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
2006 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2009 if (hSemaphore == NULL) {
2012 else if (IRQ_Context() != 0U) {
2013 count = (uint32_t)uxSemaphoreGetCountFromISR (hSemaphore);
2015 count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
2018 /* Return number of tokens */
2023 Delete a Semaphore object.
2025 osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
2026 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
2029 #ifndef USE_FreeRTOS_HEAP_1
2030 if (IRQ_Context() != 0U) {
2033 else if (hSemaphore == NULL) {
2034 stat = osErrorParameter;
2037 #if (configQUEUE_REGISTRY_SIZE > 0)
2038 vQueueUnregisterQueue (hSemaphore);
2042 vSemaphoreDelete (hSemaphore);
2048 /* Return execution status */
2053 /* ==== Message Queue Management Functions ==== */
2056 Create and Initialize a Message Queue object.
2059 - The memory for control block and and message data must be provided in the
2060 osThreadAttr_t structure in order to allocate object statically.
2062 osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
2063 QueueHandle_t hQueue;
2068 if ((IRQ_Context() == 0U) && (msg_count > 0U) && (msg_size > 0U)) {
2072 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
2073 (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
2074 /* The memory for control block and message data is provided, use static object */
2078 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
2079 (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
2080 /* Control block will be allocated from the dynamic pool */
2090 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2091 hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
2096 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2097 hQueue = xQueueCreate (msg_count, msg_size);
2102 #if (configQUEUE_REGISTRY_SIZE > 0)
2103 if (hQueue != NULL) {
2104 if ((attr != NULL) && (attr->name != NULL)) {
2105 /* Only non-NULL name objects are added to the Queue Registry */
2106 vQueueAddToRegistry (hQueue, attr->name);
2113 /* Return message queue ID */
2114 return ((osMessageQueueId_t)hQueue);
2118 Put a Message into a Queue or timeout if Queue is full.
2121 - Message priority is ignored
2123 osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
2124 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2128 (void)msg_prio; /* Message priority is ignored */
2132 if (IRQ_Context() != 0U) {
2133 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2134 stat = osErrorParameter;
2139 if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
2140 stat = osErrorResource;
2142 portYIELD_FROM_ISR (yield);
2147 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2148 stat = osErrorParameter;
2151 if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2152 if (timeout != 0U) {
2153 stat = osErrorTimeout;
2155 stat = osErrorResource;
2161 /* Return execution status */
2166 Get a Message from a Queue or timeout if Queue is empty.
2169 - Message priority is ignored
2171 osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
2172 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2176 (void)msg_prio; /* Message priority is ignored */
2180 if (IRQ_Context() != 0U) {
2181 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2182 stat = osErrorParameter;
2187 if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
2188 stat = osErrorResource;
2190 portYIELD_FROM_ISR (yield);
2195 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2196 stat = osErrorParameter;
2199 if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2200 if (timeout != 0U) {
2201 stat = osErrorTimeout;
2203 stat = osErrorResource;
2209 /* Return execution status */
2214 Get maximum number of messages in a Message Queue.
2216 uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
2217 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2223 /* capacity = pxQueue->uxLength */
2224 capacity = mq->uxDummy4[1];
2227 /* Return maximum number of messages */
2232 Get maximum message size in a Message Queue.
2234 uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
2235 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2241 /* size = pxQueue->uxItemSize */
2242 size = mq->uxDummy4[2];
2245 /* Return maximum message size */
2250 Get number of queued messages in a Message Queue.
2252 uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
2253 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2256 if (hQueue == NULL) {
2259 else if (IRQ_Context() != 0U) {
2260 count = uxQueueMessagesWaitingFromISR (hQueue);
2263 count = uxQueueMessagesWaiting (hQueue);
2266 /* Return number of queued messages */
2267 return ((uint32_t)count);
2271 Get number of available slots for messages in a Message Queue.
2273 uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
2274 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2281 else if (IRQ_Context() != 0U) {
2282 isrm = taskENTER_CRITICAL_FROM_ISR();
2284 /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
2285 space = mq->uxDummy4[1] - mq->uxDummy4[0];
2287 taskEXIT_CRITICAL_FROM_ISR(isrm);
2290 space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq);
2293 /* Return number of available slots */
2298 Reset a Message Queue to initial empty state.
2300 osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
2301 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2304 if (IRQ_Context() != 0U) {
2307 else if (hQueue == NULL) {
2308 stat = osErrorParameter;
2312 (void)xQueueReset (hQueue);
2315 /* Return execution status */
2320 Delete a Message Queue object.
2322 osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
2323 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2326 #ifndef USE_FreeRTOS_HEAP_1
2327 if (IRQ_Context() != 0U) {
2330 else if (hQueue == NULL) {
2331 stat = osErrorParameter;
2334 #if (configQUEUE_REGISTRY_SIZE > 0)
2335 vQueueUnregisterQueue (hQueue);
2339 vQueueDelete (hQueue);
2345 /* Return execution status */
2350 /* ==== Memory Pool Management Functions ==== */
2352 #ifdef FREERTOS_MPOOL_H_
2353 /* Static memory pool functions */
2354 static void FreeBlock (MemPool_t *mp, void *block);
2355 static void *AllocBlock (MemPool_t *mp);
2356 static void *CreateBlock (MemPool_t *mp);
2359 Create and Initialize a Memory Pool object.
2361 osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
2364 int32_t mem_cb, mem_mp;
2367 if (IRQ_Context() != 0U) {
2370 else if ((block_count == 0U) || (block_size == 0U)) {
2375 sz = MEMPOOL_ARR_SIZE (block_count, block_size);
2382 if (attr->name != NULL) {
2386 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(MemPool_t))) {
2387 /* Static control block is provided */
2390 else if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
2391 /* Allocate control block memory on heap */
2395 if ((attr->mp_mem == NULL) && (attr->mp_size == 0U)) {
2396 /* Allocate memory array on heap */
2400 if (attr->mp_mem != NULL) {
2401 /* Check if array is 4-byte aligned */
2402 if (((uint32_t)attr->mp_mem & 3U) == 0U) {
2403 /* Check if array big enough */
2404 if (attr->mp_size >= sz) {
2405 /* Static memory pool array is provided */
2413 /* Attributes not provided, allocate memory on heap */
2419 mp = pvPortMalloc (sizeof(MemPool_t));
2425 /* Create a semaphore (max count == initial count == block_count) */
2426 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2427 mp->sem = xSemaphoreCreateCountingStatic (block_count, block_count, &mp->mem_sem);
2428 #elif (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2429 mp->sem = xSemaphoreCreateCounting (block_count, block_count);
2434 if (mp->sem != NULL) {
2435 /* Setup memory array */
2437 mp->mem_arr = pvPortMalloc (sz);
2439 mp->mem_arr = attr->mp_mem;
2444 if ((mp != NULL) && (mp->mem_arr != NULL)) {
2445 /* Memory pool can be created */
2449 mp->bl_sz = block_size;
2450 mp->bl_cnt = block_count;
2453 /* Set heap allocated memory flags */
2454 mp->status = MPOOL_STATUS;
2457 /* Control block on heap */
2461 /* Memory array on heap */
2466 /* Memory pool cannot be created, release allocated resources */
2467 if ((mem_cb == 0) && (mp != NULL)) {
2468 /* Free control block memory */
2475 /* Return memory pool ID */
2480 Get name of a Memory Pool object.
2482 const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
2483 MemPool_t *mp = (osMemoryPoolId_t)mp_id;
2486 if (IRQ_Context() != 0U) {
2489 else if (mp_id == NULL) {
2496 /* Return name as null-terminated string */
2501 Allocate a memory block from a Memory Pool.
2503 void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
2508 if (mp_id == NULL) {
2509 /* Invalid input parameters */
2515 mp = (MemPool_t *)mp_id;
2517 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2518 if (IRQ_Context() != 0U) {
2519 if (timeout == 0U) {
2520 if (xSemaphoreTakeFromISR (mp->sem, NULL) == pdTRUE) {
2521 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2522 isrm = taskENTER_CRITICAL_FROM_ISR();
2524 /* Get a block from the free-list */
2525 block = AllocBlock(mp);
2527 if (block == NULL) {
2528 /* List of free blocks is empty, 'create' new block */
2529 block = CreateBlock(mp);
2532 taskEXIT_CRITICAL_FROM_ISR(isrm);
2538 if (xSemaphoreTake (mp->sem, (TickType_t)timeout) == pdTRUE) {
2539 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2540 taskENTER_CRITICAL();
2542 /* Get a block from the free-list */
2543 block = AllocBlock(mp);
2545 if (block == NULL) {
2546 /* List of free blocks is empty, 'create' new block */
2547 block = CreateBlock(mp);
2550 taskEXIT_CRITICAL();
2557 /* Return memory block address */
2562 Return an allocated memory block back to a Memory Pool.
2564 osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
2570 if ((mp_id == NULL) || (block == NULL)) {
2571 /* Invalid input parameters */
2572 stat = osErrorParameter;
2575 mp = (MemPool_t *)mp_id;
2577 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2578 /* Invalid object status */
2579 stat = osErrorResource;
2581 else if ((block < (void *)&mp->mem_arr[0]) || (block > (void*)&mp->mem_arr[mp->mem_sz-1])) {
2582 /* Block pointer outside of memory array area */
2583 stat = osErrorParameter;
2588 if (IRQ_Context() != 0U) {
2589 if (uxSemaphoreGetCountFromISR (mp->sem) == mp->bl_cnt) {
2590 stat = osErrorResource;
2593 isrm = taskENTER_CRITICAL_FROM_ISR();
2595 /* Add block to the list of free blocks */
2596 FreeBlock(mp, block);
2598 taskEXIT_CRITICAL_FROM_ISR(isrm);
2601 xSemaphoreGiveFromISR (mp->sem, &yield);
2602 portYIELD_FROM_ISR (yield);
2606 if (uxSemaphoreGetCount (mp->sem) == mp->bl_cnt) {
2607 stat = osErrorResource;
2610 taskENTER_CRITICAL();
2612 /* Add block to the list of free blocks */
2613 FreeBlock(mp, block);
2615 taskEXIT_CRITICAL();
2617 xSemaphoreGive (mp->sem);
2623 /* Return execution status */
2628 Get maximum number of memory blocks in a Memory Pool.
2630 uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
2634 if (mp_id == NULL) {
2635 /* Invalid input parameters */
2639 mp = (MemPool_t *)mp_id;
2641 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2642 /* Invalid object status */
2650 /* Return maximum number of memory blocks */
2655 Get memory block size in a Memory Pool.
2657 uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
2661 if (mp_id == NULL) {
2662 /* Invalid input parameters */
2666 mp = (MemPool_t *)mp_id;
2668 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2669 /* Invalid object status */
2677 /* Return memory block size in bytes */
2682 Get number of memory blocks used in a Memory Pool.
2684 uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
2688 if (mp_id == NULL) {
2689 /* Invalid input parameters */
2693 mp = (MemPool_t *)mp_id;
2695 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2696 /* Invalid object status */
2700 if (IRQ_Context() != 0U) {
2701 n = uxSemaphoreGetCountFromISR (mp->sem);
2703 n = uxSemaphoreGetCount (mp->sem);
2710 /* Return number of memory blocks used */
2715 Get number of memory blocks available in a Memory Pool.
2717 uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
2721 if (mp_id == NULL) {
2722 /* Invalid input parameters */
2726 mp = (MemPool_t *)mp_id;
2728 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2729 /* Invalid object status */
2733 if (IRQ_Context() != 0U) {
2734 n = uxSemaphoreGetCountFromISR (mp->sem);
2736 n = uxSemaphoreGetCount (mp->sem);
2741 /* Return number of memory blocks available */
2746 Delete a Memory Pool object.
2748 osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
2752 if (mp_id == NULL) {
2753 /* Invalid input parameters */
2754 stat = osErrorParameter;
2756 else if (IRQ_Context() != 0U) {
2760 mp = (MemPool_t *)mp_id;
2762 taskENTER_CRITICAL();
2764 /* Invalidate control block status */
2765 mp->status = mp->status & 3U;
2767 /* Wake-up tasks waiting for pool semaphore */
2768 while (xSemaphoreGive (mp->sem) == pdTRUE);
2774 if ((mp->status & 2U) != 0U) {
2775 /* Memory pool array allocated on heap */
2776 vPortFree (mp->mem_arr);
2778 if ((mp->status & 1U) != 0U) {
2779 /* Memory pool control block allocated on heap */
2783 taskEXIT_CRITICAL();
2788 /* Return execution status */
2793 Create new block given according to the current block index.
2795 static void *CreateBlock (MemPool_t *mp) {
2796 MemPoolBlock_t *p = NULL;
2798 if (mp->n < mp->bl_cnt) {
2799 /* Unallocated blocks exist, set pointer to new block */
2800 p = (void *)(mp->mem_arr + (mp->bl_sz * mp->n));
2802 /* Increment block index */
2810 Allocate a block by reading the list of free blocks.
2812 static void *AllocBlock (MemPool_t *mp) {
2813 MemPoolBlock_t *p = NULL;
2815 if (mp->head != NULL) {
2816 /* List of free block exists, get head block */
2819 /* Head block is now next on the list */
2827 Free block by putting it to the list of free blocks.
2829 static void FreeBlock (MemPool_t *mp, void *block) {
2830 MemPoolBlock_t *p = block;
2832 /* Store current head into block memory space */
2835 /* Store current block as new head */
2838 #endif /* FREERTOS_MPOOL_H_ */
2839 /*---------------------------------------------------------------------------*/
2841 /* Callback function prototypes */
2842 extern void vApplicationIdleHook (void);
2843 extern void vApplicationMallocFailedHook (void);
2844 extern void vApplicationDaemonTaskStartupHook (void);
2847 Dummy implementation of the callback function vApplicationIdleHook().
2849 #if (configUSE_IDLE_HOOK == 1)
2850 __WEAK void vApplicationIdleHook (void){}
2854 Dummy implementation of the callback function vApplicationTickHook().
2856 #if (configUSE_TICK_HOOK == 1)
2857 __WEAK void vApplicationTickHook (void){}
2861 Dummy implementation of the callback function vApplicationMallocFailedHook().
2863 #if (configUSE_MALLOC_FAILED_HOOK == 1)
2864 __WEAK void vApplicationMallocFailedHook (void) {
2865 /* Assert when malloc failed hook is enabled but no application defined function exists */
2871 Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
2873 #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
2874 __WEAK void vApplicationDaemonTaskStartupHook (void){}
2878 Dummy implementation of the callback function vApplicationStackOverflowHook().
2880 #if (configCHECK_FOR_STACK_OVERFLOW > 0)
2881 __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, char *pcTaskName) {
2885 /* Assert when stack overflow is enabled but no application defined function exists */
2890 /*---------------------------------------------------------------------------*/
2891 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2893 vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2894 equals to 1 and is required for static memory allocation support.
2896 __WEAK void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) {
2897 /* Idle task control block and stack */
2898 static StaticTask_t Idle_TCB;
2899 static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
2901 *ppxIdleTaskTCBBuffer = &Idle_TCB;
2902 *ppxIdleTaskStackBuffer = &Idle_Stack[0];
2903 *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
2907 vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2908 equals to 1 and is required for static memory allocation support.
2910 __WEAK void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) {
2911 /* Timer task control block and stack */
2912 static StaticTask_t Timer_TCB;
2913 static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
2915 *ppxTimerTaskTCBBuffer = &Timer_TCB;
2916 *ppxTimerTaskStackBuffer = &Timer_Stack[0];
2917 *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;