1 /* --------------------------------------------------------------------------
2 * Copyright (c) 2013-2020 Arm Limited. All rights reserved.
4 * SPDX-License-Identifier: Apache-2.0
6 * Licensed under the Apache License, Version 2.0 (the License); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
21 *---------------------------------------------------------------------------*/
25 #include "cmsis_os2.h" // ::CMSIS:RTOS2
26 #include "cmsis_compiler.h" // Compiler agnostic definitions
27 #include "os_tick.h" // OS Tick API
29 #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
30 #include "task.h" // ARM.FreeRTOS::RTOS:Core
31 #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
32 #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
34 #include "freertos_mpool.h" // osMemoryPool definitions
35 #include "freertos_os2.h" // Configuration check and setup
37 /*---------------------------------------------------------------------------*/
38 #ifndef __ARM_ARCH_6M__
39 #define __ARM_ARCH_6M__ 0
41 #ifndef __ARM_ARCH_7M__
42 #define __ARM_ARCH_7M__ 0
44 #ifndef __ARM_ARCH_7EM__
45 #define __ARM_ARCH_7EM__ 0
47 #ifndef __ARM_ARCH_8M_MAIN__
48 #define __ARM_ARCH_8M_MAIN__ 0
50 #ifndef __ARM_ARCH_7A__
51 #define __ARM_ARCH_7A__ 0
54 #if ((__ARM_ARCH_7M__ == 1U) || \
55 (__ARM_ARCH_7EM__ == 1U) || \
56 (__ARM_ARCH_8M_MAIN__ == 1U))
57 #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U))
58 #elif (__ARM_ARCH_6M__ == 1U)
59 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
60 #elif (__ARM_ARCH_7A__ == 1U)
62 #define CPSR_MASKBIT_I 0x80U
64 #define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U)
66 #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
69 #if (__ARM_ARCH_7A__ == 1U)
70 /* CPSR mode bitmasks */
71 #define CPSR_MODE_USER 0x10U
72 #define CPSR_MODE_SYSTEM 0x1FU
74 #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
76 #define IS_IRQ_MODE() (__get_IPSR() != 0U)
80 #define MAX_BITS_TASK_NOTIFY 31U
81 #define MAX_BITS_EVENT_GROUPS 24U
83 #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
84 #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
86 /* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */
87 #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
88 ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
89 ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
91 #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
93 /* Timer callback information structure definition */
99 /* Kernel initialization state */
100 static osKernelState_t KernelState = osKernelInactive;
103 Heap region definition used by heap_5 variant
105 Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
106 heap regions are already defined and vPortDefineHeapRegions is called in application.
108 Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
109 definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
110 is possible by defining it globally or in FreeRTOSConfig.h.
112 #if defined(USE_FreeRTOS_HEAP_5)
113 #if (configAPPLICATION_ALLOCATED_HEAP == 0)
115 FreeRTOS heap is not defined by the application.
116 Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
117 is provided by default. Define configHEAP_5_REGIONS to provide custom
120 #define HEAP_5_REGION_SETUP 1
122 #ifndef configHEAP_5_REGIONS
123 #define configHEAP_5_REGIONS xHeapRegions
125 static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
127 static HeapRegion_t xHeapRegions[] = {
128 { ucHeap, configTOTAL_HEAP_SIZE },
132 /* Global definition is provided to override default heap array */
133 extern HeapRegion_t configHEAP_5_REGIONS[];
137 The application already defined the array used for the FreeRTOS heap and
138 called vPortDefineHeapRegions to initialize heap.
140 #define HEAP_5_REGION_SETUP 0
141 #endif /* configAPPLICATION_ALLOCATED_HEAP */
142 #endif /* USE_FreeRTOS_HEAP_5 */
145 #undef SysTick_Handler
147 /* CMSIS SysTick interrupt handler prototype */
148 extern void SysTick_Handler (void);
149 /* FreeRTOS tick timer interrupt handler prototype */
150 extern void xPortSysTickHandler (void);
153 SysTick handler implementation that also clears overflow flag.
155 void SysTick_Handler (void) {
156 /* Clear overflow flag */
159 if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
160 /* Call tick handler */
161 xPortSysTickHandler();
167 Setup SVC to reset value.
169 __STATIC_INLINE void SVC_Setup (void) {
170 #if (__ARM_ARCH_7A__ == 0U)
171 /* Service Call interrupt might be configured before kernel start */
172 /* and when its priority is lower or equal to BASEPRI, svc intruction */
173 /* causes a Hard Fault. */
174 NVIC_SetPriority (SVCall_IRQn, 0U);
179 Function macro used to retrieve semaphore count from ISR
181 #ifndef uxSemaphoreGetCountFromISR
182 #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
186 Determine if CPU executes from interrupt context or if interrupts are masked.
188 __STATIC_INLINE uint32_t IRQ_Context (void) {
195 /* Called from interrupt context */
199 /* Get FreeRTOS scheduler state */
200 state = xTaskGetSchedulerState();
202 if (state != taskSCHEDULER_NOT_STARTED) {
203 /* Scheduler was started */
204 if (IS_IRQ_MASKED()) {
205 /* Interrupts are masked */
214 /*---------------------------------------------------------------------------*/
216 osStatus_t osKernelInitialize (void) {
219 if (IRQ_Context() != 0U) {
223 if (KernelState == osKernelInactive) {
224 #if defined(USE_TRACE_EVENT_RECORDER)
225 EvrFreeRTOSSetup(0U);
227 #if defined(USE_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
228 vPortDefineHeapRegions (configHEAP_5_REGIONS);
230 KernelState = osKernelReady;
240 osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
242 if (version != NULL) {
243 /* Version encoding is major.minor.rev: mmnnnrrrr dec */
244 version->api = KERNEL_VERSION;
245 version->kernel = KERNEL_VERSION;
248 if ((id_buf != NULL) && (id_size != 0U)) {
249 if (id_size > sizeof(KERNEL_ID)) {
250 id_size = sizeof(KERNEL_ID);
252 memcpy(id_buf, KERNEL_ID, id_size);
258 osKernelState_t osKernelGetState (void) {
259 osKernelState_t state;
261 switch (xTaskGetSchedulerState()) {
262 case taskSCHEDULER_RUNNING:
263 state = osKernelRunning;
266 case taskSCHEDULER_SUSPENDED:
267 state = osKernelLocked;
270 case taskSCHEDULER_NOT_STARTED:
272 if (KernelState == osKernelReady) {
273 state = osKernelReady;
275 state = osKernelInactive;
283 osStatus_t osKernelStart (void) {
286 if (IRQ_Context() != 0U) {
290 if (KernelState == osKernelReady) {
291 /* Ensure SVC priority is at the reset value */
293 /* Change state to enable IRQ masking check */
294 KernelState = osKernelRunning;
295 /* Start the kernel scheduler */
296 vTaskStartScheduler();
306 int32_t osKernelLock (void) {
309 if (IRQ_Context() != 0U) {
310 lock = (int32_t)osErrorISR;
313 switch (xTaskGetSchedulerState()) {
314 case taskSCHEDULER_SUSPENDED:
318 case taskSCHEDULER_RUNNING:
323 case taskSCHEDULER_NOT_STARTED:
325 lock = (int32_t)osError;
333 int32_t osKernelUnlock (void) {
336 if (IRQ_Context() != 0U) {
337 lock = (int32_t)osErrorISR;
340 switch (xTaskGetSchedulerState()) {
341 case taskSCHEDULER_SUSPENDED:
344 if (xTaskResumeAll() != pdTRUE) {
345 if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
346 lock = (int32_t)osError;
351 case taskSCHEDULER_RUNNING:
355 case taskSCHEDULER_NOT_STARTED:
357 lock = (int32_t)osError;
365 int32_t osKernelRestoreLock (int32_t lock) {
367 if (IRQ_Context() != 0U) {
368 lock = (int32_t)osErrorISR;
371 switch (xTaskGetSchedulerState()) {
372 case taskSCHEDULER_SUSPENDED:
373 case taskSCHEDULER_RUNNING:
379 lock = (int32_t)osError;
382 if (xTaskResumeAll() != pdTRUE) {
383 if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
384 lock = (int32_t)osError;
391 case taskSCHEDULER_NOT_STARTED:
393 lock = (int32_t)osError;
401 uint32_t osKernelGetTickCount (void) {
404 if (IRQ_Context() != 0U) {
405 ticks = xTaskGetTickCountFromISR();
407 ticks = xTaskGetTickCount();
413 uint32_t osKernelGetTickFreq (void) {
414 return (configTICK_RATE_HZ);
417 uint32_t osKernelGetSysTimerCount (void) {
418 uint32_t irqmask = IS_IRQ_MASKED();
424 ticks = xTaskGetTickCount();
425 val = OS_Tick_GetCount();
427 if (OS_Tick_GetOverflow() != 0U) {
428 val = OS_Tick_GetCount();
431 val += ticks * OS_Tick_GetInterval();
440 uint32_t osKernelGetSysTimerFreq (void) {
441 return (configCPU_CLOCK_HZ);
444 /*---------------------------------------------------------------------------*/
446 osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
455 if ((IRQ_Context() == 0U) && (func != NULL)) {
456 stack = configMINIMAL_STACK_SIZE;
457 prio = (UBaseType_t)osPriorityNormal;
463 if (attr->name != NULL) {
466 if (attr->priority != osPriorityNone) {
467 prio = (UBaseType_t)attr->priority;
470 if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
474 if (attr->stack_size > 0U) {
475 /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
476 /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
477 stack = attr->stack_size / sizeof(StackType_t);
480 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
481 (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
485 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
495 #if (configSUPPORT_STATIC_ALLOCATION == 1)
496 hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem,
497 (StaticTask_t *)attr->cb_mem);
502 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
503 if (xTaskCreate ((TaskFunction_t)func, name, (uint16_t)stack, argument, prio, &hTask) != pdPASS) {
511 return ((osThreadId_t)hTask);
514 const char *osThreadGetName (osThreadId_t thread_id) {
515 TaskHandle_t hTask = (TaskHandle_t)thread_id;
518 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
521 name = pcTaskGetName (hTask);
527 osThreadId_t osThreadGetId (void) {
530 id = (osThreadId_t)xTaskGetCurrentTaskHandle();
535 osThreadState_t osThreadGetState (osThreadId_t thread_id) {
536 TaskHandle_t hTask = (TaskHandle_t)thread_id;
537 osThreadState_t state;
539 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
540 state = osThreadError;
543 switch (eTaskGetState (hTask)) {
544 case eRunning: state = osThreadRunning; break;
545 case eReady: state = osThreadReady; break;
547 case eSuspended: state = osThreadBlocked; break;
548 case eDeleted: state = osThreadTerminated; break;
550 default: state = osThreadError; break;
557 uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
558 TaskHandle_t hTask = (TaskHandle_t)thread_id;
561 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
564 sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
570 osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
571 TaskHandle_t hTask = (TaskHandle_t)thread_id;
574 if (IRQ_Context() != 0U) {
577 else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
578 stat = osErrorParameter;
582 vTaskPrioritySet (hTask, (UBaseType_t)priority);
588 osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
589 TaskHandle_t hTask = (TaskHandle_t)thread_id;
592 if ((IRQ_Context() != 0U) || (hTask == NULL)) {
593 prio = osPriorityError;
595 prio = (osPriority_t)((int32_t)uxTaskPriorityGet (hTask));
601 osStatus_t osThreadYield (void) {
604 if (IRQ_Context() != 0U) {
614 #if (configUSE_OS2_THREAD_SUSPEND_RESUME == 1)
615 osStatus_t osThreadSuspend (osThreadId_t thread_id) {
616 TaskHandle_t hTask = (TaskHandle_t)thread_id;
619 if (IRQ_Context() != 0U) {
622 else if (hTask == NULL) {
623 stat = osErrorParameter;
627 vTaskSuspend (hTask);
633 osStatus_t osThreadResume (osThreadId_t thread_id) {
634 TaskHandle_t hTask = (TaskHandle_t)thread_id;
637 if (IRQ_Context() != 0U) {
640 else if (hTask == NULL) {
641 stat = osErrorParameter;
650 #endif /* (configUSE_OS2_THREAD_SUSPEND_RESUME == 1) */
652 __NO_RETURN void osThreadExit (void) {
653 #ifndef USE_FreeRTOS_HEAP_1
659 osStatus_t osThreadTerminate (osThreadId_t thread_id) {
660 TaskHandle_t hTask = (TaskHandle_t)thread_id;
662 #ifndef USE_FreeRTOS_HEAP_1
665 if (IRQ_Context() != 0U) {
668 else if (hTask == NULL) {
669 stat = osErrorParameter;
672 tstate = eTaskGetState (hTask);
674 if (tstate != eDeleted) {
678 stat = osErrorResource;
688 uint32_t osThreadGetCount (void) {
691 if (IRQ_Context() != 0U) {
694 count = uxTaskGetNumberOfTasks();
700 #if (configUSE_OS2_THREAD_ENUMERATE == 1)
701 uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
705 if ((IRQ_Context() != 0U) || (thread_array == NULL) || (array_items == 0U)) {
710 count = uxTaskGetNumberOfTasks();
711 task = pvPortMalloc (count * sizeof(TaskStatus_t));
714 count = uxTaskGetSystemState (task, count, NULL);
716 for (i = 0U; (i < count) && (i < array_items); i++) {
717 thread_array[i] = (osThreadId_t)task[i].xHandle;
721 (void)xTaskResumeAll();
728 #endif /* (configUSE_OS2_THREAD_ENUMERATE == 1) */
730 #if (configUSE_OS2_THREAD_FLAGS == 1)
731 uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
732 TaskHandle_t hTask = (TaskHandle_t)thread_id;
736 if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
737 rflags = (uint32_t)osErrorParameter;
740 rflags = (uint32_t)osError;
742 if (IRQ_Context() != 0U) {
745 (void)xTaskNotifyFromISR (hTask, flags, eSetBits, &yield);
746 (void)xTaskNotifyAndQueryFromISR (hTask, 0, eNoAction, &rflags, NULL);
748 portYIELD_FROM_ISR (yield);
751 (void)xTaskNotify (hTask, flags, eSetBits);
752 (void)xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags);
755 /* Return flags after setting */
759 uint32_t osThreadFlagsClear (uint32_t flags) {
761 uint32_t rflags, cflags;
763 if (IRQ_Context() != 0U) {
764 rflags = (uint32_t)osErrorISR;
766 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
767 rflags = (uint32_t)osErrorParameter;
770 hTask = xTaskGetCurrentTaskHandle();
772 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &cflags) == pdPASS) {
776 if (xTaskNotify (hTask, cflags, eSetValueWithOverwrite) != pdPASS) {
777 rflags = (uint32_t)osError;
781 rflags = (uint32_t)osError;
785 /* Return flags before clearing */
789 uint32_t osThreadFlagsGet (void) {
793 if (IRQ_Context() != 0U) {
794 rflags = (uint32_t)osErrorISR;
797 hTask = xTaskGetCurrentTaskHandle();
799 if (xTaskNotifyAndQuery (hTask, 0, eNoAction, &rflags) != pdPASS) {
800 rflags = (uint32_t)osError;
807 uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
808 uint32_t rflags, nval;
810 TickType_t t0, td, tout;
813 if (IRQ_Context() != 0U) {
814 rflags = (uint32_t)osErrorISR;
816 else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
817 rflags = (uint32_t)osErrorParameter;
820 if ((options & osFlagsNoClear) == osFlagsNoClear) {
829 t0 = xTaskGetTickCount();
831 rval = xTaskNotifyWait (0, clear, &nval, tout);
833 if (rval == pdPASS) {
837 if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
838 if ((flags & rflags) == flags) {
842 rflags = (uint32_t)osErrorResource;
848 if ((flags & rflags) != 0) {
852 rflags = (uint32_t)osErrorResource;
859 td = xTaskGetTickCount() - t0;
869 rflags = (uint32_t)osErrorResource;
871 rflags = (uint32_t)osErrorTimeout;
875 while (rval != pdFAIL);
878 /* Return flags before clearing */
881 #endif /* (configUSE_OS2_THREAD_FLAGS == 1) */
883 osStatus_t osDelay (uint32_t ticks) {
886 if (IRQ_Context() != 0U) {
900 osStatus_t osDelayUntil (uint32_t ticks) {
901 TickType_t tcnt, delay;
904 if (IRQ_Context() != 0U) {
909 tcnt = xTaskGetTickCount();
911 /* Determine remaining number of ticks to delay */
912 delay = (TickType_t)ticks - tcnt;
914 /* Check if target tick has not expired */
915 if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
916 vTaskDelayUntil (&tcnt, delay);
920 /* No delay or already expired */
921 stat = osErrorParameter;
928 /*---------------------------------------------------------------------------*/
929 #if (configUSE_OS2_TIMER == 1)
931 static void TimerCallback (TimerHandle_t hTimer) {
932 TimerCallback_t *callb;
934 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
937 callb->func (callb->arg);
941 osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
943 TimerHandle_t hTimer;
944 TimerCallback_t *callb;
950 if ((IRQ_Context() == 0U) && (func != NULL)) {
951 /* Allocate memory to store callback function and argument */
952 callb = pvPortMalloc (sizeof(TimerCallback_t));
956 callb->arg = argument;
958 if (type == osTimerOnce) {
968 if (attr->name != NULL) {
972 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
976 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
986 #if (configSUPPORT_STATIC_ALLOCATION == 1)
987 hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
992 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
993 hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
998 if ((hTimer == NULL) && (callb != NULL)) {
1004 return ((osTimerId_t)hTimer);
1007 const char *osTimerGetName (osTimerId_t timer_id) {
1008 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1011 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1014 p = pcTimerGetName (hTimer);
1020 osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
1021 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1024 if (IRQ_Context() != 0U) {
1027 else if (hTimer == NULL) {
1028 stat = osErrorParameter;
1031 if (xTimerChangePeriod (hTimer, ticks, 0) == pdPASS) {
1034 stat = osErrorResource;
1041 osStatus_t osTimerStop (osTimerId_t timer_id) {
1042 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1045 if (IRQ_Context() != 0U) {
1048 else if (hTimer == NULL) {
1049 stat = osErrorParameter;
1052 if (xTimerIsTimerActive (hTimer) == pdFALSE) {
1053 stat = osErrorResource;
1056 if (xTimerStop (hTimer, 0) == pdPASS) {
1067 uint32_t osTimerIsRunning (osTimerId_t timer_id) {
1068 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1071 if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
1074 running = (uint32_t)xTimerIsTimerActive (hTimer);
1080 osStatus_t osTimerDelete (osTimerId_t timer_id) {
1081 TimerHandle_t hTimer = (TimerHandle_t)timer_id;
1083 #ifndef USE_FreeRTOS_HEAP_1
1084 TimerCallback_t *callb;
1086 if (IRQ_Context() != 0U) {
1089 else if (hTimer == NULL) {
1090 stat = osErrorParameter;
1093 callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
1095 if (xTimerDelete (hTimer, 0) == pdPASS) {
1099 stat = osErrorResource;
1108 #endif /* (configUSE_OS2_TIMER == 1) */
1110 /*---------------------------------------------------------------------------*/
1112 osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
1113 EventGroupHandle_t hEventGroup;
1118 if (IRQ_Context() == 0U) {
1122 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
1126 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1136 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1137 hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
1142 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1143 hEventGroup = xEventGroupCreate();
1149 return ((osEventFlagsId_t)hEventGroup);
1152 uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
1153 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1157 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1158 rflags = (uint32_t)osErrorParameter;
1160 else if (IRQ_Context() != 0U) {
1161 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1163 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1164 rflags = (uint32_t)osErrorResource;
1168 if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
1169 rflags = (uint32_t)osErrorResource;
1172 portYIELD_FROM_ISR (yield);
1177 rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
1183 uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
1184 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1187 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1188 rflags = (uint32_t)osErrorParameter;
1190 else if (IRQ_Context() != 0U) {
1191 #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
1192 /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
1193 rflags = (uint32_t)osErrorResource;
1195 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1197 if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
1198 rflags = (uint32_t)osErrorResource;
1203 rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
1209 uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
1210 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1213 if (ef_id == NULL) {
1216 else if (IRQ_Context() != 0U) {
1217 rflags = xEventGroupGetBitsFromISR (hEventGroup);
1220 rflags = xEventGroupGetBits (hEventGroup);
1226 uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
1227 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1228 BaseType_t wait_all;
1229 BaseType_t exit_clr;
1232 if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
1233 rflags = (uint32_t)osErrorParameter;
1235 else if (IRQ_Context() != 0U) {
1236 rflags = (uint32_t)osErrorISR;
1239 if (options & osFlagsWaitAll) {
1245 if (options & osFlagsNoClear) {
1251 rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
1253 if (options & osFlagsWaitAll) {
1254 if ((flags & rflags) != flags) {
1256 rflags = (uint32_t)osErrorTimeout;
1258 rflags = (uint32_t)osErrorResource;
1263 if ((flags & rflags) == 0U) {
1265 rflags = (uint32_t)osErrorTimeout;
1267 rflags = (uint32_t)osErrorResource;
1276 osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
1277 EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
1280 #ifndef USE_FreeRTOS_HEAP_1
1281 if (IRQ_Context() != 0U) {
1284 else if (hEventGroup == NULL) {
1285 stat = osErrorParameter;
1289 vEventGroupDelete (hEventGroup);
1298 /*---------------------------------------------------------------------------*/
1299 #if (configUSE_OS2_MUTEX == 1)
1301 osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
1302 SemaphoreHandle_t hMutex;
1306 #if (configQUEUE_REGISTRY_SIZE > 0)
1312 if (IRQ_Context() == 0U) {
1314 type = attr->attr_bits;
1319 if ((type & osMutexRecursive) == osMutexRecursive) {
1325 if ((type & osMutexRobust) != osMutexRobust) {
1329 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1333 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1343 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1345 #if (configUSE_RECURSIVE_MUTEXES == 1)
1346 hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
1350 hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
1356 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1358 #if (configUSE_RECURSIVE_MUTEXES == 1)
1359 hMutex = xSemaphoreCreateRecursiveMutex ();
1362 hMutex = xSemaphoreCreateMutex ();
1368 #if (configQUEUE_REGISTRY_SIZE > 0)
1369 if (hMutex != NULL) {
1375 vQueueAddToRegistry (hMutex, name);
1379 if ((hMutex != NULL) && (rmtx != 0U)) {
1380 hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
1385 return ((osMutexId_t)hMutex);
1388 osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
1389 SemaphoreHandle_t hMutex;
1393 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1395 rmtx = (uint32_t)mutex_id & 1U;
1399 if (IRQ_Context() != 0U) {
1402 else if (hMutex == NULL) {
1403 stat = osErrorParameter;
1407 #if (configUSE_RECURSIVE_MUTEXES == 1)
1408 if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
1409 if (timeout != 0U) {
1410 stat = osErrorTimeout;
1412 stat = osErrorResource;
1418 if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
1419 if (timeout != 0U) {
1420 stat = osErrorTimeout;
1422 stat = osErrorResource;
1431 osStatus_t osMutexRelease (osMutexId_t mutex_id) {
1432 SemaphoreHandle_t hMutex;
1436 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1438 rmtx = (uint32_t)mutex_id & 1U;
1442 if (IRQ_Context() != 0U) {
1445 else if (hMutex == NULL) {
1446 stat = osErrorParameter;
1450 #if (configUSE_RECURSIVE_MUTEXES == 1)
1451 if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
1452 stat = osErrorResource;
1457 if (xSemaphoreGive (hMutex) != pdPASS) {
1458 stat = osErrorResource;
1466 osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
1467 SemaphoreHandle_t hMutex;
1470 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1472 if ((IRQ_Context() != 0U) || (hMutex == NULL)) {
1475 owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
1481 osStatus_t osMutexDelete (osMutexId_t mutex_id) {
1483 #ifndef USE_FreeRTOS_HEAP_1
1484 SemaphoreHandle_t hMutex;
1486 hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
1488 if (IRQ_Context() != 0U) {
1491 else if (hMutex == NULL) {
1492 stat = osErrorParameter;
1495 #if (configQUEUE_REGISTRY_SIZE > 0)
1496 vQueueUnregisterQueue (hMutex);
1499 vSemaphoreDelete (hMutex);
1507 #endif /* (configUSE_OS2_MUTEX == 1) */
1509 /*---------------------------------------------------------------------------*/
1511 osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
1512 SemaphoreHandle_t hSemaphore;
1514 #if (configQUEUE_REGISTRY_SIZE > 0)
1520 if ((IRQ_Context() == 0U) && (max_count > 0U) && (initial_count <= max_count)) {
1524 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
1528 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1538 if (max_count == 1U) {
1540 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1541 hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
1545 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1546 hSemaphore = xSemaphoreCreateBinary();
1550 if ((hSemaphore != NULL) && (initial_count != 0U)) {
1551 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1552 vSemaphoreDelete (hSemaphore);
1559 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1560 hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
1564 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1565 hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
1570 #if (configQUEUE_REGISTRY_SIZE > 0)
1571 if (hSemaphore != NULL) {
1577 vQueueAddToRegistry (hSemaphore, name);
1583 return ((osSemaphoreId_t)hSemaphore);
1586 osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
1587 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1593 if (hSemaphore == NULL) {
1594 stat = osErrorParameter;
1596 else if (IRQ_Context() != 0U) {
1597 if (timeout != 0U) {
1598 stat = osErrorParameter;
1603 if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
1604 stat = osErrorResource;
1606 portYIELD_FROM_ISR (yield);
1611 if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
1612 if (timeout != 0U) {
1613 stat = osErrorTimeout;
1615 stat = osErrorResource;
1623 osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
1624 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1630 if (hSemaphore == NULL) {
1631 stat = osErrorParameter;
1633 else if (IRQ_Context() != 0U) {
1636 if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
1637 stat = osErrorResource;
1639 portYIELD_FROM_ISR (yield);
1643 if (xSemaphoreGive (hSemaphore) != pdPASS) {
1644 stat = osErrorResource;
1651 uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
1652 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1655 if (hSemaphore == NULL) {
1658 else if (IRQ_Context() != 0U) {
1659 count = uxQueueMessagesWaitingFromISR (hSemaphore);
1661 count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
1667 osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
1668 SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
1671 #ifndef USE_FreeRTOS_HEAP_1
1672 if (IRQ_Context() != 0U) {
1675 else if (hSemaphore == NULL) {
1676 stat = osErrorParameter;
1679 #if (configQUEUE_REGISTRY_SIZE > 0)
1680 vQueueUnregisterQueue (hSemaphore);
1684 vSemaphoreDelete (hSemaphore);
1693 /*---------------------------------------------------------------------------*/
1695 osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
1696 QueueHandle_t hQueue;
1698 #if (configQUEUE_REGISTRY_SIZE > 0)
1704 if ((IRQ_Context() == 0U) && (msg_count > 0U) && (msg_size > 0U)) {
1708 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
1709 (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
1713 if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
1714 (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
1724 #if (configSUPPORT_STATIC_ALLOCATION == 1)
1725 hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
1730 #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
1731 hQueue = xQueueCreate (msg_count, msg_size);
1736 #if (configQUEUE_REGISTRY_SIZE > 0)
1737 if (hQueue != NULL) {
1743 vQueueAddToRegistry (hQueue, name);
1749 return ((osMessageQueueId_t)hQueue);
1752 osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
1753 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1757 (void)msg_prio; /* Message priority is ignored */
1761 if (IRQ_Context() != 0U) {
1762 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
1763 stat = osErrorParameter;
1768 if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
1769 stat = osErrorResource;
1771 portYIELD_FROM_ISR (yield);
1776 if ((hQueue == NULL) || (msg_ptr == NULL)) {
1777 stat = osErrorParameter;
1780 if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
1781 if (timeout != 0U) {
1782 stat = osErrorTimeout;
1784 stat = osErrorResource;
1793 osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
1794 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1798 (void)msg_prio; /* Message priority is ignored */
1802 if (IRQ_Context() != 0U) {
1803 if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
1804 stat = osErrorParameter;
1809 if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
1810 stat = osErrorResource;
1812 portYIELD_FROM_ISR (yield);
1817 if ((hQueue == NULL) || (msg_ptr == NULL)) {
1818 stat = osErrorParameter;
1821 if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
1822 if (timeout != 0U) {
1823 stat = osErrorTimeout;
1825 stat = osErrorResource;
1834 uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
1835 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1841 /* capacity = pxQueue->uxLength */
1842 capacity = mq->uxDummy4[1];
1848 uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
1849 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1855 /* size = pxQueue->uxItemSize */
1856 size = mq->uxDummy4[2];
1862 uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
1863 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1866 if (hQueue == NULL) {
1869 else if (IRQ_Context() != 0U) {
1870 count = uxQueueMessagesWaitingFromISR (hQueue);
1873 count = uxQueueMessagesWaiting (hQueue);
1876 return ((uint32_t)count);
1879 uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
1880 StaticQueue_t *mq = (StaticQueue_t *)mq_id;
1887 else if (IRQ_Context() != 0U) {
1888 isrm = taskENTER_CRITICAL_FROM_ISR();
1890 /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
1891 space = mq->uxDummy4[1] - mq->uxDummy4[0];
1893 taskEXIT_CRITICAL_FROM_ISR(isrm);
1896 space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq);
1902 osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
1903 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1906 if (IRQ_Context() != 0U) {
1909 else if (hQueue == NULL) {
1910 stat = osErrorParameter;
1914 (void)xQueueReset (hQueue);
1920 osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
1921 QueueHandle_t hQueue = (QueueHandle_t)mq_id;
1924 #ifndef USE_FreeRTOS_HEAP_1
1925 if (IRQ_Context() != 0U) {
1928 else if (hQueue == NULL) {
1929 stat = osErrorParameter;
1932 #if (configQUEUE_REGISTRY_SIZE > 0)
1933 vQueueUnregisterQueue (hQueue);
1937 vQueueDelete (hQueue);
1946 /*---------------------------------------------------------------------------*/
1947 #ifdef FREERTOS_MPOOL_H_
1949 /* Static memory pool functions */
1950 static void FreeBlock (MemPool_t *mp, void *block);
1951 static void *AllocBlock (MemPool_t *mp);
1952 static void *CreateBlock (MemPool_t *mp);
1954 osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
1957 int32_t mem_cb, mem_mp;
1960 if (IRQ_Context() != 0U) {
1963 else if ((block_count == 0U) || (block_size == 0U)) {
1968 sz = MEMPOOL_ARR_SIZE (block_count, block_size);
1975 if (attr->name != NULL) {
1979 if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(MemPool_t))) {
1980 /* Static control block is provided */
1983 else if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
1984 /* Allocate control block memory on heap */
1988 if ((attr->mp_mem == NULL) && (attr->mp_size == 0U)) {
1989 /* Allocate memory array on heap */
1993 if (attr->mp_mem != NULL) {
1994 /* Check if array is 4-byte aligned */
1995 if (((uint32_t)attr->mp_mem & 3U) == 0U) {
1996 /* Check if array big enough */
1997 if (attr->mp_size >= sz) {
1998 /* Static memory pool array is provided */
2006 /* Attributes not provided, allocate memory on heap */
2012 mp = pvPortMalloc (sizeof(MemPool_t));
2018 /* Create a semaphore (max count == initial count == block_count) */
2019 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2020 mp->sem = xSemaphoreCreateCountingStatic (block_count, block_count, &mp->mem_sem);
2021 #elif (configSUPPORT_DYNAMIC_ALLOCATION == 1)
2022 mp->sem = xSemaphoreCreateCounting (block_count, block_count);
2027 if (mp->sem != NULL) {
2028 /* Setup memory array */
2030 mp->mem_arr = pvPortMalloc (sz);
2032 mp->mem_arr = attr->mp_mem;
2037 if ((mp != NULL) && (mp->mem_arr != NULL)) {
2038 /* Memory pool can be created */
2042 mp->bl_sz = block_size;
2043 mp->bl_cnt = block_count;
2046 /* Set heap allocated memory flags */
2047 mp->status = MPOOL_STATUS;
2050 /* Control block on heap */
2054 /* Memory array on heap */
2059 /* Memory pool cannot be created, release allocated resources */
2060 if ((mem_cb == 0) && (mp != NULL)) {
2061 /* Free control block memory */
2071 const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
2072 MemPool_t *mp = (osMemoryPoolId_t)mp_id;
2075 if (IRQ_Context() != 0U) {
2078 else if (mp_id == NULL) {
2088 void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
2093 if (mp_id == NULL) {
2094 /* Invalid input parameters */
2100 mp = (MemPool_t *)mp_id;
2102 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2103 if (IRQ_Context() != 0U) {
2104 if (timeout == 0U) {
2105 if (xSemaphoreTakeFromISR (mp->sem, NULL) == pdTRUE) {
2106 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2107 isrm = taskENTER_CRITICAL_FROM_ISR();
2109 /* Get a block from the free-list */
2110 block = AllocBlock(mp);
2112 if (block == NULL) {
2113 /* List of free blocks is empty, 'create' new block */
2114 block = CreateBlock(mp);
2117 taskEXIT_CRITICAL_FROM_ISR(isrm);
2123 if (xSemaphoreTake (mp->sem, (TickType_t)timeout) == pdTRUE) {
2124 if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
2125 taskENTER_CRITICAL();
2127 /* Get a block from the free-list */
2128 block = AllocBlock(mp);
2130 if (block == NULL) {
2131 /* List of free blocks is empty, 'create' new block */
2132 block = CreateBlock(mp);
2135 taskEXIT_CRITICAL();
2145 osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
2151 if ((mp_id == NULL) || (block == NULL)) {
2152 /* Invalid input parameters */
2153 stat = osErrorParameter;
2156 mp = (MemPool_t *)mp_id;
2158 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2159 /* Invalid object status */
2160 stat = osErrorResource;
2162 else if ((block < (void *)&mp->mem_arr[0]) || (block > (void*)&mp->mem_arr[mp->mem_sz-1])) {
2163 /* Block pointer outside of memory array area */
2164 stat = osErrorParameter;
2169 if (IRQ_Context() != 0U) {
2170 if (uxSemaphoreGetCountFromISR (mp->sem) == mp->bl_cnt) {
2171 stat = osErrorResource;
2174 isrm = taskENTER_CRITICAL_FROM_ISR();
2176 /* Add block to the list of free blocks */
2177 FreeBlock(mp, block);
2179 taskEXIT_CRITICAL_FROM_ISR(isrm);
2182 xSemaphoreGiveFromISR (mp->sem, &yield);
2183 portYIELD_FROM_ISR (yield);
2187 if (uxSemaphoreGetCount (mp->sem) == mp->bl_cnt) {
2188 stat = osErrorResource;
2191 taskENTER_CRITICAL();
2193 /* Add block to the list of free blocks */
2194 FreeBlock(mp, block);
2196 taskEXIT_CRITICAL();
2198 xSemaphoreGive (mp->sem);
2207 uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
2211 if (mp_id == NULL) {
2212 /* Invalid input parameters */
2216 mp = (MemPool_t *)mp_id;
2218 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2219 /* Invalid object status */
2227 /* Return maximum number of memory blocks */
2231 uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
2235 if (mp_id == NULL) {
2236 /* Invalid input parameters */
2240 mp = (MemPool_t *)mp_id;
2242 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2243 /* Invalid object status */
2251 /* Return memory block size in bytes */
2255 uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
2259 if (mp_id == NULL) {
2260 /* Invalid input parameters */
2264 mp = (MemPool_t *)mp_id;
2266 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2267 /* Invalid object status */
2271 if (IRQ_Context() != 0U) {
2272 n = uxSemaphoreGetCountFromISR (mp->sem);
2274 n = uxSemaphoreGetCount (mp->sem);
2281 /* Return number of memory blocks used */
2285 uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
2289 if (mp_id == NULL) {
2290 /* Invalid input parameters */
2294 mp = (MemPool_t *)mp_id;
2296 if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
2297 /* Invalid object status */
2301 if (IRQ_Context() != 0U) {
2302 n = uxSemaphoreGetCountFromISR (mp->sem);
2304 n = uxSemaphoreGetCount (mp->sem);
2309 /* Return number of memory blocks available */
2313 osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
2317 if (mp_id == NULL) {
2318 /* Invalid input parameters */
2319 stat = osErrorParameter;
2321 else if (IRQ_Context() != 0U) {
2325 mp = (MemPool_t *)mp_id;
2327 taskENTER_CRITICAL();
2329 /* Invalidate control block status */
2330 mp->status = mp->status & 3U;
2332 /* Wake-up tasks waiting for pool semaphore */
2333 while (xSemaphoreGive (mp->sem) == pdTRUE);
2339 if ((mp->status & 2U) != 0U) {
2340 /* Memory pool array allocated on heap */
2341 vPortFree (mp->mem_arr);
2343 if ((mp->status & 1U) != 0U) {
2344 /* Memory pool control block allocated on heap */
2348 taskEXIT_CRITICAL();
2357 Create new block given according to the current block index.
2359 static void *CreateBlock (MemPool_t *mp) {
2360 MemPoolBlock_t *p = NULL;
2362 if (mp->n < mp->bl_cnt) {
2363 /* Unallocated blocks exist, set pointer to new block */
2364 p = (void *)(mp->mem_arr + (mp->bl_sz * mp->n));
2366 /* Increment block index */
2374 Allocate a block by reading the list of free blocks.
2376 static void *AllocBlock (MemPool_t *mp) {
2377 MemPoolBlock_t *p = NULL;
2379 if (mp->head != NULL) {
2380 /* List of free block exists, get head block */
2383 /* Head block is now next on the list */
2391 Free block by putting it to the list of free blocks.
2393 static void FreeBlock (MemPool_t *mp, void *block) {
2394 MemPoolBlock_t *p = block;
2396 /* Store current head into block memory space */
2399 /* Store current block as new head */
2402 #endif /* FREERTOS_MPOOL_H_ */
2403 /*---------------------------------------------------------------------------*/
2405 /* Callback function prototypes */
2406 extern void vApplicationIdleHook (void);
2407 extern void vApplicationTickHook (void);
2408 extern void vApplicationMallocFailedHook (void);
2409 extern void vApplicationDaemonTaskStartupHook (void);
2410 extern void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName);
2413 Dummy implementation of the callback function vApplicationIdleHook().
2415 #if (configUSE_IDLE_HOOK == 1)
2416 __WEAK void vApplicationIdleHook (void){}
2420 Dummy implementation of the callback function vApplicationTickHook().
2422 #if (configUSE_TICK_HOOK == 1)
2423 __WEAK void vApplicationTickHook (void){}
2427 Dummy implementation of the callback function vApplicationMallocFailedHook().
2429 #if (configUSE_MALLOC_FAILED_HOOK == 1)
2430 __WEAK void vApplicationMallocFailedHook (void){}
2434 Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
2436 #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
2437 __WEAK void vApplicationDaemonTaskStartupHook (void){}
2441 Dummy implementation of the callback function vApplicationStackOverflowHook().
2443 #if (configCHECK_FOR_STACK_OVERFLOW > 0)
2444 __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, signed char *pcTaskName) {
2451 /*---------------------------------------------------------------------------*/
2452 #if (configSUPPORT_STATIC_ALLOCATION == 1)
2453 /* External Idle and Timer task static memory allocation functions */
2454 extern void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize);
2455 extern void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize);
2458 vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2459 equals to 1 and is required for static memory allocation support.
2461 __WEAK void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) {
2462 /* Idle task control block and stack */
2463 static StaticTask_t Idle_TCB;
2464 static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
2466 *ppxIdleTaskTCBBuffer = &Idle_TCB;
2467 *ppxIdleTaskStackBuffer = &Idle_Stack[0];
2468 *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
2472 vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
2473 equals to 1 and is required for static memory allocation support.
2475 __WEAK void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) {
2476 /* Timer task control block and stack */
2477 static StaticTask_t Timer_TCB;
2478 static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
2480 *ppxTimerTaskTCBBuffer = &Timer_TCB;
2481 *ppxTimerTaskStackBuffer = &Timer_Stack[0];
2482 *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;