1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
4 * SPDX-License-Identifier: Apache-2.0
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
21 *---------------------------------------------------------------------------*/
25 #include "cmsis_os2.h" // ::CMSIS:RTOS2
26 #include "cmsis_compiler.h" // Compiler agnostic definitions
27 #include "os_tick.h" // OS Tick API
29 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
30 #include "task.h" // ARM.FreeRTOS::RTOS:Core
31 #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
32 #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
33 #include "timers.h" // ARM.FreeRTOS::RTOS:Timers
35 #include "freertos_mpool.h" // osMemoryPool definitions
36 #include "freertos_os2.h" // Configuration check and setup
38 /*---------------------------------------------------------------------------*/
39 #ifndef __ARM_ARCH_6M__
40 #define __ARM_ARCH_6M__ 0
42 #ifndef __ARM_ARCH_7M__
43 #define __ARM_ARCH_7M__ 0
45 #ifndef __ARM_ARCH_7EM__
46 #define __ARM_ARCH_7EM__ 0
48 #ifndef __ARM_ARCH_8M_MAIN__
49 #define __ARM_ARCH_8M_MAIN__ 0
51 #ifndef __ARM_ARCH_7A__
52 #define __ARM_ARCH_7A__ 0
55 #if ((__ARM_ARCH_7M__ == 1U) || \
56 (__ARM_ARCH_7EM__ == 1U) || \
57 (__ARM_ARCH_8M_MAIN__ == 1U))
58 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U))
59 #elif (__ARM_ARCH_6M__ == 1U)
60 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
61 #elif (__ARM_ARCH_7A__ == 1U)
63 #define CPSR_MASKBIT_I 0x80U
65 #define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U)
67 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
70 #if (__ARM_ARCH_7A__ == 1U)
71 /* CPSR mode bitmasks */
72 #define CPSR_MODE_USER 0x10U
73 #define CPSR_MODE_SYSTEM 0x1FU
75 #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
77 #define IS_IRQ_MODE() (__get_IPSR() != 0U)
81 #define MAX_BITS_TASK_NOTIFY 31U
82 #define MAX_BITS_EVENT_GROUPS 24U
84 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
85 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
87 /* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */
88 #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
89 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
90 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
92 #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
94 /* Timer callback information structure definition */
100 /* Kernel initialization state */
101 static osKernelState_t KernelState = osKernelInactive;
104 Heap region definition used by heap_5 variant
106 Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
107 heap regions are already defined and vPortDefineHeapRegions is called in application.
109 Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
110 definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
111 is possible by defining it globally or in FreeRTOSConfig.h.
113 #if defined(USE_FreeRTOS_HEAP_5)
114 #if (configAPPLICATION_ALLOCATED_HEAP == 0)
116 FreeRTOS heap is not defined by the application.
117 Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
118 is provided by default. Define configHEAP_5_REGIONS to provide custom
121 #define HEAP_5_REGION_SETUP 1
123 #ifndef configHEAP_5_REGIONS
124 #define configHEAP_5_REGIONS xHeapRegions
126 static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
128 static HeapRegion_t xHeapRegions[] = {
129 { ucHeap, configTOTAL_HEAP_SIZE },
133 /* Global definition is provided to override default heap array */
134 extern HeapRegion_t configHEAP_5_REGIONS[];
138 The application already defined the array used for the FreeRTOS heap and
139 called vPortDefineHeapRegions to initialize heap.
141 #define HEAP_5_REGION_SETUP 0
142 #endif /* configAPPLICATION_ALLOCATED_HEAP */
143 #endif /* USE_FreeRTOS_HEAP_5 */
146 #undef SysTick_Handler
148 /* CMSIS SysTick interrupt handler prototype */
149 extern void SysTick_Handler (void);
150 /* FreeRTOS tick timer interrupt handler prototype */
151 extern void xPortSysTickHandler (void);
154 SysTick handler implementation that also clears overflow flag.
156 void SysTick_Handler (void) {
157 /* Clear overflow flag */
160 if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
161 /* Call tick handler */
162 xPortSysTickHandler();
168 Setup SVC to reset value.
170 __STATIC_INLINE void SVC_Setup (void) {
171 #if (__ARM_ARCH_7A__ == 0U)
172 /* Service Call interrupt might be configured before kernel start */
173 /* and when its priority is lower or equal to BASEPRI, svc intruction */
174 /* causes a Hard Fault. */
175 NVIC_SetPriority (SVCall_IRQn, 0U);
180 Function macro used to retrieve semaphore count from ISR
182 #ifndef uxSemaphoreGetCountFromISR
183 #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
187 Determine if CPU executes from interrupt context or if interrupts are masked.
189 __STATIC_INLINE uint32_t IRQ_Context (void) {
196 /* Called from interrupt context */
200 /* Get FreeRTOS scheduler state */
201 state = xTaskGetSchedulerState();
203 if (state != taskSCHEDULER_NOT_STARTED) {
204 /* Scheduler was started */
205 if (IS_IRQ_MASKED()) {
206 /* Interrupts are masked */
212 /* Return context, 0: thread context, 1: IRQ context */
217 /* ==== Kernel Management Functions ==== */
220 Initialize the RTOS Kernel.
222 osStatus_t osKernelInitialize (void) {
226 if (IRQ_Context() != 0U) {
230 state = xTaskGetSchedulerState();
232 /* Initialize if scheduler not started and not initialized before */
233 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelInactive)) {
234 #if defined(USE_TRACE_EVENT_RECORDER)
235 /* Initialize the trace macro debugging output channel */
236 EvrFreeRTOSSetup(0U);
238 #if defined(USE_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
239 /* Initialize the memory regions when using heap_5 variant */
240 vPortDefineHeapRegions (configHEAP_5_REGIONS);
242 KernelState = osKernelReady;
249 /* Return execution status */
254 Get RTOS Kernel Information.
256 osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
258 if (version != NULL) {
259 /* Version encoding is major.minor.rev: mmnnnrrrr dec */
260 version->api = KERNEL_VERSION;
261 version->kernel = KERNEL_VERSION;
264 if ((id_buf != NULL) && (id_size != 0U)) {
265 /* Buffer for retrieving identification string is provided */
266 if (id_size > sizeof(KERNEL_ID)) {
267 id_size = sizeof(KERNEL_ID);
269 /* Copy kernel identification string into provided buffer */
270 memcpy(id_buf, KERNEL_ID, id_size);
273 /* Return execution status */
278 Get the current RTOS Kernel state.
280 osKernelState_t osKernelGetState (void) {
281 osKernelState_t state;
283 switch (xTaskGetSchedulerState()) {
284 case taskSCHEDULER_RUNNING:
285 state = osKernelRunning;
288 case taskSCHEDULER_SUSPENDED:
289 state = osKernelLocked;
292 case taskSCHEDULER_NOT_STARTED:
294 if (KernelState == osKernelReady) {
295 /* Ready, osKernelInitialize was already called */
296 state = osKernelReady;
298 /* Not initialized */
299 state = osKernelInactive;
304 /* Return current state */
309 Start the RTOS Kernel scheduler.
311 osStatus_t osKernelStart (void) {
315 if (IRQ_Context() != 0U) {
319 state = xTaskGetSchedulerState();
321 /* Start scheduler if initialized and not started before */
322 if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelReady)) {
323 /* Ensure SVC priority is at the reset value */
325 /* Change state to ensure correct API flow */
326 KernelState = osKernelRunning;
327 /* Start the kernel scheduler */
328 vTaskStartScheduler();
335 /* Return execution status */
340 Lock the RTOS Kernel scheduler.
342 int32_t osKernelLock (void) {
345 if (IRQ_Context() != 0U) {
346 lock = (int32_t)osErrorISR;
349 switch (xTaskGetSchedulerState()) {
350 case taskSCHEDULER_SUSPENDED:
354 case taskSCHEDULER_RUNNING:
359 case taskSCHEDULER_NOT_STARTED:
361 lock = (int32_t)osError;
366 /* Return previous lock state */
371 Unlock the RTOS Kernel scheduler.
373 int32_t osKernelUnlock (void) {
376 if (IRQ_Context() != 0U) {
377 lock = (int32_t)osErrorISR;
380 switch (xTaskGetSchedulerState()) {
381 case taskSCHEDULER_SUSPENDED:
384 if (xTaskResumeAll() != pdTRUE) {
385 if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
386 lock = (int32_t)osError;
391 case taskSCHEDULER_RUNNING:
395 case taskSCHEDULER_NOT_STARTED:
397 lock = (int32_t)osError;
402 /* Return previous lock state */
407 Restore the RTOS Kernel scheduler lock state.
409 int32_t osKernelRestoreLock (int32_t lock) {
411 if (IRQ_Context() != 0U) {
412 lock = (int32_t)osErrorISR;
415 switch (xTaskGetSchedulerState()) {
416 case taskSCHEDULER_SUSPENDED:
417 case taskSCHEDULER_RUNNING:
423 lock = (int32_t)osError;
426 if (xTaskResumeAll() != pdTRUE) {
427 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
428 lock = (int32_t)osError;
435 case taskSCHEDULER_NOT_STARTED:
437 lock = (int32_t)osError;
442 /* Return new lock state */
447 Get the RTOS kernel tick count.
449 uint32_t osKernelGetTickCount (void) {
452 if (IRQ_Context() != 0U) {
453 ticks = xTaskGetTickCountFromISR();
455 ticks = xTaskGetTickCount();
458 /* Return kernel tick count */
463 Get the RTOS kernel tick frequency.
465 uint32_t osKernelGetTickFreq (void) {
466 /* Return frequency in hertz */
467 return (configTICK_RATE_HZ);
471 Get the RTOS kernel system timer count.
473 uint32_t osKernelGetSysTimerCount (void) {
474 uint32_t irqmask = IS_IRQ_MASKED();
480 ticks = xTaskGetTickCount();
481 val = OS_Tick_GetCount();
483 /* Update tick count and timer value when timer overflows */
484 if (OS_Tick_GetOverflow() != 0U) {
485 val = OS_Tick_GetCount();
488 val += ticks * OS_Tick_GetInterval();
494 /* Return system timer count */
499 Get the RTOS kernel system timer frequency.
501 uint32_t osKernelGetSysTimerFreq (void) {
502 /* Return frequency in hertz */
503 return (configCPU_CLOCK_HZ);
507 /* ==== Thread Management Functions ==== */
510 Create a thread and add it to Active Threads.
513 - The memory for control block and stack must be provided in the osThreadAttr_t
514 structure in order to allocate object statically.
515 - Attribute osThreadJoinable is not supported, NULL is returned if used.
517 osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
526 if ((IRQ_Context() == 0U) && (func != NULL)) {
527 stack = configMINIMAL_STACK_SIZE;
528 prio = (UBaseType_t)osPriorityNormal;
534 if (attr->name != NULL) {
537 if (attr->priority != osPriorityNone) {
538 prio = (UBaseType_t)attr->priority;
541 if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
542 /* Invalid priority or unsupported osThreadJoinable attribute used */
546 if (attr->stack_size > 0U) {
547 /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
548 /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
549 stack = attr->stack_size / sizeof(StackType_t);
552 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
553 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
554 /* The memory for control block and stack is provided, use static object */
558 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
559 /* Control block and stack memory will be allocated from the dynamic pool */
569 #if (configSUPPORT_STATIC_ALLOCATION == 1)
570 hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem,
571 (StaticTask_t *)attr->cb_mem);
576 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
577 if (xTaskCreate ((TaskFunction_t)func, name, (uint16_t)stack, argument, prio, &hTask) != pdPASS) {
585 /* Return thread ID */
586 return ((osThreadId_t)hTask);
590 Get name of a thread.
592 const char *osThreadGetName (osThreadId_t thread_id) {
593 TaskHandle_t hTask = (TaskHandle_t)thread_id;
596 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
599 name = pcTaskGetName (hTask);
602 /* Return name as null-terminated string */
607 Return the thread ID of the current running thread.
609 osThreadId_t osThreadGetId (void) {
612 id = (osThreadId_t)xTaskGetCurrentTaskHandle();
614 /* Return thread ID */
619 Get current thread state of a thread.
621 osThreadState_t osThreadGetState (osThreadId_t thread_id) {
622 TaskHandle_t hTask = (TaskHandle_t)thread_id;
623 osThreadState_t state;
625 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
626 state = osThreadError;
629 switch (eTaskGetState (hTask)) {
630 case eRunning: state = osThreadRunning; break;
631 case eReady: state = osThreadReady; break;
633 case eSuspended: state = osThreadBlocked; break;
634 case eDeleted: state = osThreadTerminated; break;
636 default: state = osThreadError; break;
640 /* Return current thread state */
645 Get available stack space of a thread based on stack watermark recording during execution.
647 uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
648 TaskHandle_t hTask = (TaskHandle_t)thread_id;
651 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
654 sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
657 /* Return remaining stack space in bytes */
662 Change priority of a thread.
664 osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
665 TaskHandle_t hTask = (TaskHandle_t)thread_id;
668 if (IRQ_Context() != 0U) {
671 else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
672 stat = osErrorParameter;
676 vTaskPrioritySet (hTask, (UBaseType_t)priority);
679 /* Return execution status */
684 Get current priority of a thread.
686 osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
687 TaskHandle_t hTask = (TaskHandle_t)thread_id;
690 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
691 prio = osPriorityError;
693 prio = (osPriority_t)((int32_t)uxTaskPriorityGet (hTask));
696 /* Return current thread priority */
701 Pass control to next thread that is in state READY.
703 osStatus_t osThreadYield (void) {
706 if (IRQ_Context() != 0U) {
713 /* Return execution status */
717 #if (configUSE_OS2_THREAD_SUSPEND_RESUME == 1)
719 Suspend execution of a thread.
721 osStatus_t osThreadSuspend (osThreadId_t thread_id) {
722 TaskHandle_t hTask = (TaskHandle_t)thread_id;
725 if (IRQ_Context() != 0U) {
728 else if (hTask == NULL) {
729 stat = osErrorParameter;
733 vTaskSuspend (hTask);
736 /* Return execution status */
741 Resume execution of a thread.
743 osStatus_t osThreadResume (osThreadId_t thread_id) {
744 TaskHandle_t hTask = (TaskHandle_t)thread_id;
747 if (IRQ_Context() != 0U) {
750 else if (hTask == NULL) {
751 stat = osErrorParameter;
758 /* Return execution status */
761 #endif /* (configUSE_OS2_THREAD_SUSPEND_RESUME == 1) */
764 Terminate execution of current running thread.
766 __NO_RETURN void osThreadExit (void) {
767 #ifndef USE_FreeRTOS_HEAP_1
774 Terminate execution of a thread.
776 osStatus_t osThreadTerminate (osThreadId_t thread_id) {
777 TaskHandle_t hTask = (TaskHandle_t)thread_id;
779 #ifndef USE_FreeRTOS_HEAP_1
782 if (IRQ_Context() != 0U) {
785 else if (hTask == NULL) {
786 stat = osErrorParameter;
789 tstate = eTaskGetState (hTask);
791 if (tstate != eDeleted) {
795 stat = osErrorResource;
802 /* Return execution status */
807 Get number of active threads.
809 uint32_t osThreadGetCount (void) {
812 if (IRQ_Context() != 0U) {
815 count = uxTaskGetNumberOfTasks();
818 /* Return number of active threads */
822 #if (configUSE_OS2_THREAD_ENUMERATE == 1)
824 Enumerate active threads.
826 uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
830 if ((IRQ_Context() != 0U) || (thread_array == NULL) || (array_items == 0U)) {
835 /* Allocate memory on heap to temporarily store TaskStatus_t information */
836 count = uxTaskGetNumberOfTasks();
837 task = pvPortMalloc (count * sizeof(TaskStatus_t));
840 /* Retrieve task status information */
841 count = uxTaskGetSystemState (task, count, NULL);
843 /* Copy handles from task status array into provided thread array */
844 for (i = 0U; (i < count) && (i < array_items); i++) {
845 thread_array[i] = (osThreadId_t)task[i].xHandle;
849 (void)xTaskResumeAll();
854 /* Return number of enumerated threads */
857 #endif /* (configUSE_OS2_THREAD_ENUMERATE == 1) */
860 /* ==== Thread Flags Functions ==== */
862 #if (configUSE_OS2_THREAD_FLAGS == 1)
864 Set the specified Thread Flags of a thread.
866 uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
867 TaskHandle_t hTask = (TaskHandle_t)thread_id;
871 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
872 rflags = (uint32_t)osErrorParameter;
875 rflags = (uint32_t)osError;
877 if (IRQ_Context() != 0U) {
880 (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield);
881 (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL);
883 portYIELD_FROM_ISR (yield);
886 (void)xTaskNotify (hTask, flags, eSetBits);
887 (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags);
890 /* Return flags after setting */
895 Clear the specified Thread Flags of current running thread.
897 uint32_t osThreadFlagsClear (uint32_t flags) {
899 uint32_t rflags, cflags;
901 if (IRQ_Context() != 0U) {
902 rflags = (uint32_t)osErrorISR;
904 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
905 rflags = (uint32_t)osErrorParameter;
908 hTask = xTaskGetCurrentTaskHandle();
910 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) {
914 if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
915 rflags = (uint32_t)osError;
919 rflags = (uint32_t)osError;
923 /* Return flags before clearing */
928 Get the current Thread Flags of current running thread.
930 uint32_t osThreadFlagsGet (void) {
934 if (IRQ_Context() != 0U) {
935 rflags = (uint32_t)osErrorISR;
938 hTask = xTaskGetCurrentTaskHandle();
940 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) {
941 rflags = (uint32_t)osError;
945 /* Return current flags */
950 Wait for one or more Thread Flags of the current running thread to become signaled.
952 uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
953 uint32_t rflags, nval;
955 TickType_t t0, td, tout;
958 if (IRQ_Context() != 0U) {
959 rflags = (uint32_t)osErrorISR;
961 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
962 rflags = (uint32_t)osErrorParameter;
965 if ((options & osFlagsNoClear) == osFlagsNoClear) {
974 t0 = xTaskGetTickCount();
976 rval = xTaskNotifyWait (0, clear, &nval, tout);
978 if (rval == pdPASS) {
982 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
983 if ((flags & rflags) == flags) {
987 rflags = (uint32_t)osErrorResource;
993 if ((flags & rflags) != 0) {
997 rflags = (uint32_t)osErrorResource;
1003 /* Update timeout */
1004 td = xTaskGetTickCount() - t0;
1014 rflags = (uint32_t)osErrorResource;
1016 rflags = (uint32_t)osErrorTimeout;
1020 while (rval != pdFAIL);
1023 /* Return flags before clearing */
1026 #endif /* (configUSE_OS2_THREAD_FLAGS == 1) */
1029 /* ==== Generic Wait Functions ==== */
1032 Wait for Timeout (Time Delay).
1034 osStatus_t osDelay (uint32_t ticks) {
1037 if (IRQ_Context() != 0U) {
1048 /* Return execution status */
1053 Wait until specified time.
1055 osStatus_t osDelayUntil (uint32_t ticks) {
1056 TickType_t tcnt, delay;
1059 if (IRQ_Context() != 0U) {
1064 tcnt = xTaskGetTickCount();
1066 /* Determine remaining number of ticks to delay */
1067 delay = (TickType_t)ticks - tcnt;
1069 /* Check if target tick has not expired */
1070 if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
1071 vTaskDelayUntil (&tcnt, delay);
1075 /* No delay or already expired */
1076 stat = osErrorParameter;
1080 /* Return execution status */
1085 /* ==== Timer Management Functions ==== */
1087 #if (configUSE_OS2_TIMER == 1)
1089 static void TimerCallback (TimerHandle_t hTimer) {
1090 TimerCallback_t *callb;
1092 /* Retrieve pointer to callback function and argument */
1093 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1095 /* Remove dynamic allocation flag */
1096 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1098 if (callb != NULL) {
1099 callb->func (callb->arg);
1104 Create and Initialize a timer.
1106 osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
1108 TimerHandle_t hTimer;
1109 TimerCallback_t *callb;
1116 if ((IRQ_Context() == 0U) && (func != NULL)) {
1120 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1121 /* Static memory allocation is available: check if memory for control block */
1122 /* is provided and if it also contains space for callback and its argument */
1123 if ((attr != NULL) && (attr->cb_mem != NULL)) {
1124 if (attr->cb_size >= (sizeof(StaticTimer_t) + sizeof(TimerCallback_t))) {
1125 callb = (TimerCallback_t *)((uint32_t)attr->cb_mem + sizeof(StaticTimer_t));
1130 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1131 /* Dynamic memory allocation is available: if memory for callback and */
1132 /* its argument is not provided, allocate it from dynamic memory pool */
1133 if (callb == NULL) {
1134 callb = (TimerCallback_t *)pvPortMalloc (sizeof(TimerCallback_t));
1136 if (callb != NULL) {
1137 /* Callback memory was allocated from dynamic pool, set flag */
1143 if (callb != NULL) {
1145 callb->arg = argument;
1147 if (type == osTimerOnce) {
1157 if (attr->name != NULL) {
1161 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
1162 /* The memory for control block is provided, use static object */
1166 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1167 /* Control block will be allocated from the dynamic pool */
1175 /* Store callback memory dynamic allocation flag */
1176 callb = (TimerCallback_t *)((uint32_t)callb | callb_dyn);
1178 TimerCallback function is always provided as a callback and is used to call application
1179 specified function with its argument both stored in structure callb.
1182 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1183 hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
1188 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1189 hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
1194 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1195 if ((hTimer == NULL) && (callb != NULL) && (callb_dyn == 1U)) {
1196 /* Failed to create a timer, release allocated resources */
1197 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1205 /* Return timer ID */
1206 return ((osTimerId_t)hTimer);
1210 Get name of a timer.
1212 const char *osTimerGetName (osTimerId_t timer_id) {
1213 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1216 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1219 p = pcTimerGetName (hTimer);
1222 /* Return name as null-terminated string */
1227 Start or restart a timer.
1229 osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
1230 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1233 if (IRQ_Context() != 0U) {
1236 else if (hTimer == NULL) {
1237 stat = osErrorParameter;
1240 if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) {
1243 stat = osErrorResource;
1247 /* Return execution status */
1254 osStatus_t osTimerStop (osTimerId_t timer_id) {
1255 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1258 if (IRQ_Context() != 0U) {
1261 else if (hTimer == NULL) {
1262 stat = osErrorParameter;
1265 if (xTimerIsTimerActive (hTimer) == pdFALSE) {
1266 stat = osErrorResource;
1269 if (xTimerStop (hTimer, 0) == pdPASS) {
1277 /* Return execution status */
1282 Check if a timer is running.
1284 uint32_t osTimerIsRunning (osTimerId_t timer_id) {
1285 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1288 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1291 running = (uint32_t)xTimerIsTimerActive (hTimer);
1294 /* Return 0: not running, 1: running */
1301 osStatus_t osTimerDelete (osTimerId_t timer_id) {
1302 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1304 #ifndef USE_FreeRTOS_HEAP_1
1305 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1306 TimerCallback_t *callb;
1309 if (IRQ_Context() != 0U) {
1312 else if (hTimer == NULL) {
1313 stat = osErrorParameter;
1316 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1317 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1320 if (xTimerDelete (hTimer, 0) == pdPASS) {
1321 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1322 if ((uint32_t)callb & 1U) {
1323 /* Callback memory was allocated from dynamic pool, clear flag */
1324 callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
1326 /* Return allocated memory to dynamic pool */
1332 stat = osErrorResource;
1339 /* Return execution status */
1342 #endif /* (configUSE_OS2_TIMER == 1) */
1345 /* ==== Event Flags Management Functions ==== */
1348 Create and Initialize an Event Flags object.
1351 - Event flags are limited to 24 bits.
1353 osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
1354 EventGroupHandle_t hEventGroup;
1359 if (IRQ_Context() == 0U) {
1363 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
1364 /* The memory for control block is provided, use static object */
1368 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1369 /* Control block will be allocated from the dynamic pool */
1379 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1380 hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
1385 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1386 hEventGroup = xEventGroupCreate();
1392 /* Return event flags ID */
1393 return ((osEventFlagsId_t)hEventGroup);
1397 Set the specified Event Flags.
1400 - Event flags are limited to 24 bits.
1402 uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
1403 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1407 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1408 rflags = (uint32_t)osErrorParameter;
1410 else if (IRQ_Context() != 0U) {
1411 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1413 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1414 rflags = (uint32_t)osErrorResource;
1418 if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
1419 rflags = (uint32_t)osErrorResource;
1422 portYIELD_FROM_ISR (yield);
1427 rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
1430 /* Return event flags after setting */
1435 Clear the specified Event Flags.
1438 - Event flags are limited to 24 bits.
1440 uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
1441 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1444 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1445 rflags = (uint32_t)osErrorParameter;
1447 else if (IRQ_Context() != 0U) {
1448 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1449 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1450 rflags = (uint32_t)osErrorResource;
1452 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1454 if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
1455 rflags = (uint32_t)osErrorResource;
1458 /* xEventGroupClearBitsFromISR only registers clear operation in the timer command queue. */
1459 /* Yield is required here otherwise clear operation might not execute in the right order. */
1460 /* See https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/93 for more info. */
1461 portYIELD_FROM_ISR (pdTRUE);
1466 rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
1469 /* Return event flags before clearing */
1474 Get the current Event Flags.
1477 - Event flags are limited to 24 bits.
1479 uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
1480 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1483 if (ef_id == NULL) {
1486 else if (IRQ_Context() != 0U) {
1487 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1490 rflags = xEventGroupGetBits (hEventGroup);
1493 /* Return current event flags */
1498 Wait for one or more Event Flags to become signaled.
1501 - Event flags are limited to 24 bits.
1502 - osEventFlagsWait cannot be called from an ISR.
1504 uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
1505 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1506 BaseType_t wait_all;
1507 BaseType_t exit_clr;
1510 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1511 rflags = (uint32_t)osErrorParameter;
1513 else if (IRQ_Context() != 0U) {
1514 rflags = (uint32_t)osErrorISR;
1517 if (options & osFlagsWaitAll) {
1523 if (options & osFlagsNoClear) {
1529 rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
1531 if (options & osFlagsWaitAll) {
1532 if ((flags & rflags) != flags) {
1534 rflags = (uint32_t)osErrorTimeout;
1536 rflags = (uint32_t)osErrorResource;
1541 if ((flags & rflags) == 0U) {
1543 rflags = (uint32_t)osErrorTimeout;
1545 rflags = (uint32_t)osErrorResource;
1551 /* Return event flags before clearing */
1556 Delete an Event Flags object.
1558 osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
1559 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1562 #ifndef USE_FreeRTOS_HEAP_1
1563 if (IRQ_Context() != 0U) {
1566 else if (hEventGroup == NULL) {
1567 stat = osErrorParameter;
1571 vEventGroupDelete (hEventGroup);
1577 /* Return execution status */
1582 /* ==== Mutex Management Functions ==== */
1584 #if (configUSE_OS2_MUTEX == 1)
1586 Create and Initialize a Mutex object.
1589 - Priority inherit protocol is used by default, osMutexPrioInherit attribute is ignored.
1590 - Robust mutex is not supported, NULL is returned if used.
1592 osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
1593 SemaphoreHandle_t hMutex;
1600 if (IRQ_Context() == 0U) {
1602 type = attr->attr_bits;
1607 if ((type & osMutexRecursive) == osMutexRecursive) {
1613 if ((type & osMutexRobust) != osMutexRobust) {
1617 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1618 /* The memory for control block is provided, use static object */
1622 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1623 /* Control block will be allocated from the dynamic pool */
1633 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1635 #if (configUSE_RECURSIVE_MUTEXES == 1)
1636 hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
1640 hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
1646 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1648 #if (configUSE_RECURSIVE_MUTEXES == 1)
1649 hMutex = xSemaphoreCreateRecursiveMutex ();
1652 hMutex = xSemaphoreCreateMutex ();
1658 #if (configQUEUE_REGISTRY_SIZE > 0)
1659 if (hMutex != NULL) {
1660 if ((attr != NULL) && (attr->name != NULL)) {
1661 /* Only non-NULL name objects are added to the Queue Registry */
1662 vQueueAddToRegistry (hMutex, attr->name);
1667 if ((hMutex != NULL) && (rmtx != 0U)) {
1668 /* Set LSB as 'recursive mutex flag' */
1669 hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
1674 /* Return mutex ID */
1675 return ((osMutexId_t)hMutex);
1679 Acquire a Mutex or timeout if it is locked.
1681 osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
1682 SemaphoreHandle_t hMutex;
1686 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1688 /* Extract recursive mutex flag */
1689 rmtx = (uint32_t)mutex_id & 1U;
1693 if (IRQ_Context() != 0U) {
1696 else if (hMutex == NULL) {
1697 stat = osErrorParameter;
1701 #if (configUSE_RECURSIVE_MUTEXES == 1)
1702 if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
1703 if (timeout != 0U) {
1704 stat = osErrorTimeout;
1706 stat = osErrorResource;
1712 if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
1713 if (timeout != 0U) {
1714 stat = osErrorTimeout;
1716 stat = osErrorResource;
1722 /* Return execution status */
1727 Release a Mutex that was acquired by osMutexAcquire.
1729 osStatus_t osMutexRelease (osMutexId_t mutex_id) {
1730 SemaphoreHandle_t hMutex;
1734 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1736 /* Extract recursive mutex flag */
1737 rmtx = (uint32_t)mutex_id & 1U;
1741 if (IRQ_Context() != 0U) {
1744 else if (hMutex == NULL) {
1745 stat = osErrorParameter;
1749 #if (configUSE_RECURSIVE_MUTEXES == 1)
1750 if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
1751 stat = osErrorResource;
1756 if (xSemaphoreGive (hMutex) != pdPASS) {
1757 stat = osErrorResource;
1762 /* Return execution status */
1767 Get Thread which owns a Mutex object.
1769 osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
1770 SemaphoreHandle_t hMutex;
1773 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1775 if ((IRQ_Context() != 0U) || (hMutex == NULL)) {
1778 owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
1781 /* Return owner thread ID */
1786 Delete a Mutex object.
1788 osStatus_t osMutexDelete (osMutexId_t mutex_id) {
1790 #ifndef USE_FreeRTOS_HEAP_1
1791 SemaphoreHandle_t hMutex;
1793 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1795 if (IRQ_Context() != 0U) {
1798 else if (hMutex == NULL) {
1799 stat = osErrorParameter;
1802 #if (configQUEUE_REGISTRY_SIZE > 0)
1803 vQueueUnregisterQueue (hMutex);
1806 vSemaphoreDelete (hMutex);
1812 /* Return execution status */
1815 #endif /* (configUSE_OS2_MUTEX == 1) */
1818 /* ==== Semaphore Management Functions ==== */
1821 Create and Initialize a Semaphore object.
1823 osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
1824 SemaphoreHandle_t hSemaphore;
1829 if ((IRQ_Context() == 0U) && (max_count > 0U) && (initial_count <= max_count)) {
1833 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1834 /* The memory for control block is provided, use static object */
1838 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1839 /* Control block will be allocated from the dynamic pool */
1849 if (max_count == 1U) {
1851 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1852 hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
1856 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1857 hSemaphore = xSemaphoreCreateBinary();
1861 if ((hSemaphore != NULL) && (initial_count != 0U)) {
1862 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1863 vSemaphoreDelete (hSemaphore);
1870 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1871 hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
1875 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1876 hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
1881 #if (configQUEUE_REGISTRY_SIZE > 0)
1882 if (hSemaphore != NULL) {
1883 if ((attr != NULL) && (attr->name != NULL)) {
1884 /* Only non-NULL name objects are added to the Queue Registry */
1885 vQueueAddToRegistry (hSemaphore, attr->name);
1892 /* Return semaphore ID */
1893 return ((osSemaphoreId_t)hSemaphore);
1897 Acquire a Semaphore token or timeout if no tokens are available.
1899 osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
1900 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1906 if (hSemaphore == NULL) {
1907 stat = osErrorParameter;
1909 else if (IRQ_Context() != 0U) {
1910 if (timeout != 0U) {
1911 stat = osErrorParameter;
1916 if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
1917 stat = osErrorResource;
1919 portYIELD_FROM_ISR (yield);
1924 if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
1925 if (timeout != 0U) {
1926 stat = osErrorTimeout;
1928 stat = osErrorResource;
1933 /* Return execution status */
1938 Release a Semaphore token up to the initial maximum count.
1940 osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
1941 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1947 if (hSemaphore == NULL) {
1948 stat = osErrorParameter;
1950 else if (IRQ_Context() != 0U) {
1953 if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
1954 stat = osErrorResource;
1956 portYIELD_FROM_ISR (yield);
1960 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1961 stat = osErrorResource;
1965 /* Return execution status */
1970 Get current Semaphore token count.
1972 uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
1973 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1976 if (hSemaphore == NULL) {
1979 else if (IRQ_Context() != 0U) {
1980 count = uxQueueMessagesWaitingFromISR (hSemaphore);
1982 count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
1985 /* Return number of tokens */
1990 Delete a Semaphore object.
1992 osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
1993 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1996 #ifndef USE_FreeRTOS_HEAP_1
1997 if (IRQ_Context() != 0U) {
2000 else if (hSemaphore == NULL) {
2001 stat = osErrorParameter;
2004 #if (configQUEUE_REGISTRY_SIZE > 0)
2005 vQueueUnregisterQueue (hSemaphore);
2009 vSemaphoreDelete (hSemaphore);
2015 /* Return execution status */
2020 /* ==== Message Queue Management Functions ==== */
2023 Create and Initialize a Message Queue object.
2026 - The memory for control block and and message data must be provided in the
2027 osThreadAttr_t structure in order to allocate object statically.
2029 osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
2030 QueueHandle_t hQueue;
2035 if ((IRQ_Context() == 0U) && (msg_count > 0U) && (msg_size > 0U)) {
2039 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
2040 (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
2041 /* The memory for control block and message data is provided, use static object */
2045 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
2046 (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
2047 /* Control block will be allocated from the dynamic pool */
2057 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2058 hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
2063 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2064 hQueue = xQueueCreate (msg_count, msg_size);
2069 #if (configQUEUE_REGISTRY_SIZE > 0)
2070 if (hQueue != NULL) {
2071 if ((attr != NULL) && (attr->name != NULL)) {
2072 /* Only non-NULL name objects are added to the Queue Registry */
2073 vQueueAddToRegistry (hQueue, attr->name);
2080 /* Return message queue ID */
2081 return ((osMessageQueueId_t)hQueue);
2085 Put a Message into a Queue or timeout if Queue is full.
2088 - Message priority is ignored
2090 osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
2091 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2095 (void)msg_prio; /* Message priority is ignored */
2099 if (IRQ_Context() != 0U) {
2100 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2101 stat = osErrorParameter;
2106 if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
2107 stat = osErrorResource;
2109 portYIELD_FROM_ISR (yield);
2114 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2115 stat = osErrorParameter;
2118 if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2119 if (timeout != 0U) {
2120 stat = osErrorTimeout;
2122 stat = osErrorResource;
2128 /* Return execution status */
2133 Get a Message from a Queue or timeout if Queue is empty.
2136 - Message priority is ignored
2138 osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
2139 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2143 (void)msg_prio; /* Message priority is ignored */
2147 if (IRQ_Context() != 0U) {
2148 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
2149 stat = osErrorParameter;
2154 if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
2155 stat = osErrorResource;
2157 portYIELD_FROM_ISR (yield);
2162 if ((hQueue == NULL) || (msg_ptr == NULL)) {
2163 stat = osErrorParameter;
2166 if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
2167 if (timeout != 0U) {
2168 stat = osErrorTimeout;
2170 stat = osErrorResource;
2176 /* Return execution status */
2181 Get maximum number of messages in a Message Queue.
2183 uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
2184 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2190 /* capacity = pxQueue->uxLength */
2191 capacity = mq->uxDummy4[1];
2194 /* Return maximum number of messages */
2199 Get maximum message size in a Message Queue.
2201 uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
2202 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2208 /* size = pxQueue->uxItemSize */
2209 size = mq->uxDummy4[2];
2212 /* Return maximum message size */
2217 Get number of queued messages in a Message Queue.
2219 uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
2220 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2223 if (hQueue == NULL) {
2226 else if (IRQ_Context() != 0U) {
2227 count = uxQueueMessagesWaitingFromISR (hQueue);
2230 count = uxQueueMessagesWaiting (hQueue);
2233 /* Return number of queued messages */
2234 return ((uint32_t)count);
2238 Get number of available slots for messages in a Message Queue.
2240 uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
2241 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
2248 else if (IRQ_Context() != 0U) {
2249 isrm = taskENTER_CRITICAL_FROM_ISR();
2251 /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
2252 space = mq->uxDummy4[1] - mq->uxDummy4[0];
2254 taskEXIT_CRITICAL_FROM_ISR(isrm);
2257 space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq);
2260 /* Return number of available slots */
2265 Reset a Message Queue to initial empty state.
2267 osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
2268 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2271 if (IRQ_Context() != 0U) {
2274 else if (hQueue == NULL) {
2275 stat = osErrorParameter;
2279 (void)xQueueReset (hQueue);
2282 /* Return execution status */
2287 Delete a Message Queue object.
2289 osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
2290 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
2293 #ifndef USE_FreeRTOS_HEAP_1
2294 if (IRQ_Context() != 0U) {
2297 else if (hQueue == NULL) {
2298 stat = osErrorParameter;
2301 #if (configQUEUE_REGISTRY_SIZE > 0)
2302 vQueueUnregisterQueue (hQueue);
2306 vQueueDelete (hQueue);
2312 /* Return execution status */
2317 /* ==== Memory Pool Management Functions ==== */
2319 #ifdef FREERTOS_MPOOL_H_
2320 /* Static memory pool functions */
2321 static void FreeBlock (MemPool_t *mp, void *block);
2322 static void *AllocBlock (MemPool_t *mp);
2323 static void *CreateBlock (MemPool_t *mp);
2326 Create and Initialize a Memory Pool object.
2328 osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
2331 int32_t mem_cb, mem_mp;
2334 if (IRQ_Context() != 0U) {
2337 else if ((block_count == 0U) || (block_size == 0U)) {
2342 sz = MEMPOOL_ARR_SIZE (block_count, block_size);
2349 if (attr->name != NULL) {
2353 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(MemPool_t))) {
2354 /* Static control block is provided */
2357 else if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
2358 /* Allocate control block memory on heap */
2362 if ((attr->mp_mem == NULL) && (attr->mp_size == 0U)) {
2363 /* Allocate memory array on heap */
2367 if (attr->mp_mem != NULL) {
2368 /* Check if array is 4-byte aligned */
2369 if (((uint32_t)attr->mp_mem & 3U) == 0U) {
2370 /* Check if array big enough */
2371 if (attr->mp_size >= sz) {
2372 /* Static memory pool array is provided */
2380 /* Attributes not provided, allocate memory on heap */
2386 mp = pvPortMalloc (sizeof(MemPool_t));
2392 /* Create a semaphore (max count == initial count == block_count) */
2393 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2394 mp->sem = xSemaphoreCreateCountingStatic (block_count, block_count, &mp->mem_sem);
2395 #elif (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2396 mp->sem = xSemaphoreCreateCounting (block_count, block_count);
2401 if (mp->sem != NULL) {
2402 /* Setup memory array */
2404 mp->mem_arr = pvPortMalloc (sz);
2406 mp->mem_arr = attr->mp_mem;
2411 if ((mp != NULL) && (mp->mem_arr != NULL)) {
2412 /* Memory pool can be created */
2416 mp->bl_sz = block_size;
2417 mp->bl_cnt = block_count;
2420 /* Set heap allocated memory flags */
2421 mp->status = MPOOL_STATUS;
2424 /* Control block on heap */
2428 /* Memory array on heap */
2433 /* Memory pool cannot be created, release allocated resources */
2434 if ((mem_cb == 0) && (mp != NULL)) {
2435 /* Free control block memory */
2442 /* Return memory pool ID */
2447 Get name of a Memory Pool object.
2449 const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
2450 MemPool_t *mp = (osMemoryPoolId_t)mp_id;
2453 if (IRQ_Context() != 0U) {
2456 else if (mp_id == NULL) {
2463 /* Return name as null-terminated string */
2468 Allocate a memory block from a Memory Pool.
2470 void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
2475 if (mp_id == NULL) {
2476 /* Invalid input parameters */
2482 mp = (MemPool_t *)mp_id;
2484 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2485 if (IRQ_Context() != 0U) {
2486 if (timeout == 0U) {
2487 if (xSemaphoreTakeFromISR (mp->sem, NULL) == pdTRUE) {
2488 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2489 isrm = taskENTER_CRITICAL_FROM_ISR();
2491 /* Get a block from the free-list */
2492 block = AllocBlock(mp);
2494 if (block == NULL) {
2495 /* List of free blocks is empty, 'create' new block */
2496 block = CreateBlock(mp);
2499 taskEXIT_CRITICAL_FROM_ISR(isrm);
2505 if (xSemaphoreTake (mp->sem, (TickType_t)timeout) == pdTRUE) {
2506 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2507 taskENTER_CRITICAL();
2509 /* Get a block from the free-list */
2510 block = AllocBlock(mp);
2512 if (block == NULL) {
2513 /* List of free blocks is empty, 'create' new block */
2514 block = CreateBlock(mp);
2517 taskEXIT_CRITICAL();
2524 /* Return memory block address */
2529 Return an allocated memory block back to a Memory Pool.
2531 osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
2537 if ((mp_id == NULL) || (block == NULL)) {
2538 /* Invalid input parameters */
2539 stat = osErrorParameter;
2542 mp = (MemPool_t *)mp_id;
2544 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2545 /* Invalid object status */
2546 stat = osErrorResource;
2548 else if ((block < (void *)&mp->mem_arr[0]) || (block > (void*)&mp->mem_arr[mp->mem_sz-1])) {
2549 /* Block pointer outside of memory array area */
2550 stat = osErrorParameter;
2555 if (IRQ_Context() != 0U) {
2556 if (uxSemaphoreGetCountFromISR (mp->sem) == mp->bl_cnt) {
2557 stat = osErrorResource;
2560 isrm = taskENTER_CRITICAL_FROM_ISR();
2562 /* Add block to the list of free blocks */
2563 FreeBlock(mp, block);
2565 taskEXIT_CRITICAL_FROM_ISR(isrm);
2568 xSemaphoreGiveFromISR (mp->sem, &yield);
2569 portYIELD_FROM_ISR (yield);
2573 if (uxSemaphoreGetCount (mp->sem) == mp->bl_cnt) {
2574 stat = osErrorResource;
2577 taskENTER_CRITICAL();
2579 /* Add block to the list of free blocks */
2580 FreeBlock(mp, block);
2582 taskEXIT_CRITICAL();
2584 xSemaphoreGive (mp->sem);
2590 /* Return execution status */
2595 Get maximum number of memory blocks in a Memory Pool.
2597 uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
2601 if (mp_id == NULL) {
2602 /* Invalid input parameters */
2606 mp = (MemPool_t *)mp_id;
2608 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2609 /* Invalid object status */
2617 /* Return maximum number of memory blocks */
2622 Get memory block size in a Memory Pool.
2624 uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
2628 if (mp_id == NULL) {
2629 /* Invalid input parameters */
2633 mp = (MemPool_t *)mp_id;
2635 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2636 /* Invalid object status */
2644 /* Return memory block size in bytes */
2649 Get number of memory blocks used in a Memory Pool.
2651 uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
2655 if (mp_id == NULL) {
2656 /* Invalid input parameters */
2660 mp = (MemPool_t *)mp_id;
2662 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2663 /* Invalid object status */
2667 if (IRQ_Context() != 0U) {
2668 n = uxSemaphoreGetCountFromISR (mp->sem);
2670 n = uxSemaphoreGetCount (mp->sem);
2677 /* Return number of memory blocks used */
2682 Get number of memory blocks available in a Memory Pool.
2684 uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
2688 if (mp_id == NULL) {
2689 /* Invalid input parameters */
2693 mp = (MemPool_t *)mp_id;
2695 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2696 /* Invalid object status */
2700 if (IRQ_Context() != 0U) {
2701 n = uxSemaphoreGetCountFromISR (mp->sem);
2703 n = uxSemaphoreGetCount (mp->sem);
2708 /* Return number of memory blocks available */
2713 Delete a Memory Pool object.
2715 osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
2719 if (mp_id == NULL) {
2720 /* Invalid input parameters */
2721 stat = osErrorParameter;
2723 else if (IRQ_Context() != 0U) {
2727 mp = (MemPool_t *)mp_id;
2729 taskENTER_CRITICAL();
2731 /* Invalidate control block status */
2732 mp->status = mp->status & 3U;
2734 /* Wake-up tasks waiting for pool semaphore */
2735 while (xSemaphoreGive (mp->sem) == pdTRUE);
2741 if ((mp->status & 2U) != 0U) {
2742 /* Memory pool array allocated on heap */
2743 vPortFree (mp->mem_arr);
2745 if ((mp->status & 1U) != 0U) {
2746 /* Memory pool control block allocated on heap */
2750 taskEXIT_CRITICAL();
2755 /* Return execution status */
2760 Create new block given according to the current block index.
2762 static void *CreateBlock (MemPool_t *mp) {
2763 MemPoolBlock_t *p = NULL;
2765 if (mp->n < mp->bl_cnt) {
2766 /* Unallocated blocks exist, set pointer to new block */
2767 p = (void *)(mp->mem_arr + (mp->bl_sz * mp->n));
2769 /* Increment block index */
2777 Allocate a block by reading the list of free blocks.
2779 static void *AllocBlock (MemPool_t *mp) {
2780 MemPoolBlock_t *p = NULL;
2782 if (mp->head != NULL) {
2783 /* List of free block exists, get head block */
2786 /* Head block is now next on the list */
2794 Free block by putting it to the list of free blocks.
2796 static void FreeBlock (MemPool_t *mp, void *block) {
2797 MemPoolBlock_t *p = block;
2799 /* Store current head into block memory space */
2802 /* Store current block as new head */
2805 #endif /* FREERTOS_MPOOL_H_ */
2806 /*---------------------------------------------------------------------------*/
2808 /* Callback function prototypes */
2809 extern void vApplicationIdleHook (void);
2810 extern void vApplicationMallocFailedHook (void);
2811 extern void vApplicationDaemonTaskStartupHook (void);
2814 Dummy implementation of the callback function vApplicationIdleHook().
2816 #if (configUSE_IDLE_HOOK == 1)
2817 __WEAK void vApplicationIdleHook (void){}
2821 Dummy implementation of the callback function vApplicationTickHook().
2823 #if (configUSE_TICK_HOOK == 1)
2824 __WEAK void vApplicationTickHook (void){}
2828 Dummy implementation of the callback function vApplicationMallocFailedHook().
2830 #if (configUSE_MALLOC_FAILED_HOOK == 1)
2831 __WEAK void vApplicationMallocFailedHook (void) {
2832 /* Assert when malloc failed hook is enabled but no application defined function exists */
2838 Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
2840 #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
2841 __WEAK void vApplicationDaemonTaskStartupHook (void){}
2845 Dummy implementation of the callback function vApplicationStackOverflowHook().
2847 #if (configCHECK_FOR_STACK_OVERFLOW > 0)
2848 __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, char *pcTaskName) {
2852 /* Assert when stack overflow is enabled but no application defined function exists */
2857 /*---------------------------------------------------------------------------*/
2858 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2860 vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2861 equals to 1 and is required for static memory allocation support.
2863 __WEAK void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) {
2864 /* Idle task control block and stack */
2865 static StaticTask_t Idle_TCB;
2866 static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
2868 *ppxIdleTaskTCBBuffer = &Idle_TCB;
2869 *ppxIdleTaskStackBuffer = &Idle_Stack[0];
2870 *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
2874 vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2875 equals to 1 and is required for static memory allocation support.
2877 __WEAK void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) {
2878 /* Timer task control block and stack */
2879 static StaticTask_t Timer_TCB;
2880 static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
2882 *ppxTimerTaskTCBBuffer = &Timer_TCB;
2883 *ppxTimerTaskStackBuffer = &Timer_Stack[0];
2884 *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;