]> begriffs open source - freertos/blob - tasks.c
Detect more startup config errors on Cortex M (#832)
[freertos] / tasks.c
1 /*
2  * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3  * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * SPDX-License-Identifier: MIT
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy of
8  * this software and associated documentation files (the "Software"), to deal in
9  * the Software without restriction, including without limitation the rights to
10  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11  * the Software, and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * https://www.FreeRTOS.org
25  * https://github.com/FreeRTOS
26  *
27  */
28
29 /* Standard includes. */
30 #include <stdlib.h>
31 #include <string.h>
32
33 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
34  * all the API functions to use the MPU wrappers.  That should only be done when
35  * task.h is included from an application file. */
36 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
37
38 /* FreeRTOS includes. */
39 #include "FreeRTOS.h"
40 #include "task.h"
41 #include "timers.h"
42 #include "stack_macros.h"
43
44 /* The MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
45  * for the header files above, but not in this file, in order to generate the
46  * correct privileged Vs unprivileged linkage and placement. */
47 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
48
49 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
50  * functions but without including stdio.h here. */
51 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
52
53 /* At the bottom of this file are two optional functions that can be used
54  * to generate human readable text from the raw data generated by the
55  * uxTaskGetSystemState() function.  Note the formatting functions are provided
56  * for convenience only, and are NOT considered part of the kernel. */
57     #include <stdio.h>
58 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
59
60 #if ( configUSE_PREEMPTION == 0 )
61
62 /* If the cooperative scheduler is being used then a yield should not be
63  * performed just because a higher priority task has been woken. */
64     #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB )
65     #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB )
66 #else
67
68     #if ( configNUMBER_OF_CORES == 1 )
69
70 /* This macro requests the running task pxTCB to yield. In single core
71  * scheduler, a running task always runs on core 0 and portYIELD_WITHIN_API()
72  * can be used to request the task running on core 0 to yield. Therefore, pxTCB
73  * is not used in this macro. */
74         #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB ) \
75     do {                                                         \
76         ( void ) ( pxTCB );                                      \
77         portYIELD_WITHIN_API();                                  \
78     } while( 0 )
79
80         #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ) \
81     do {                                                        \
82         if( pxCurrentTCB->uxPriority < ( pxTCB )->uxPriority )  \
83         {                                                       \
84             portYIELD_WITHIN_API();                             \
85         }                                                       \
86         else                                                    \
87         {                                                       \
88             mtCOVERAGE_TEST_MARKER();                           \
89         }                                                       \
90     } while( 0 )
91
92     #else /* if ( configNUMBER_OF_CORES == 1 ) */
93
94 /* Yield the core on which this task is running. */
95         #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB )    prvYieldCore( ( pxTCB )->xTaskRunState )
96
97 /* Yield for the task if a running task has priority lower than this task. */
98         #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB )     prvYieldForTask( pxTCB )
99
100     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
101
102 #endif /* if ( configUSE_PREEMPTION == 0 ) */
103
104 /* Values that can be assigned to the ucNotifyState member of the TCB. */
105 #define taskNOT_WAITING_NOTIFICATION              ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
106 #define taskWAITING_NOTIFICATION                  ( ( uint8_t ) 1 )
107 #define taskNOTIFICATION_RECEIVED                 ( ( uint8_t ) 2 )
108
109 /*
110  * The value used to fill the stack of a task when the task is created.  This
111  * is used purely for checking the high water mark for tasks.
112  */
113 #define tskSTACK_FILL_BYTE                        ( 0xa5U )
114
115 /* Bits used to record how a task's stack and TCB were allocated. */
116 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB    ( ( uint8_t ) 0 )
117 #define tskSTATICALLY_ALLOCATED_STACK_ONLY        ( ( uint8_t ) 1 )
118 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB     ( ( uint8_t ) 2 )
119
120 /* If any of the following are set then task stacks are filled with a known
121  * value so the high water mark can be determined.  If none of the following are
122  * set then don't fill the stack so there is no unnecessary dependency on memset. */
123 #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
124     #define tskSET_NEW_STACKS_TO_KNOWN_VALUE    1
125 #else
126     #define tskSET_NEW_STACKS_TO_KNOWN_VALUE    0
127 #endif
128
129 /*
130  * Macros used by vListTask to indicate which state a task is in.
131  */
132 #define tskRUNNING_CHAR      ( 'X' )
133 #define tskBLOCKED_CHAR      ( 'B' )
134 #define tskREADY_CHAR        ( 'R' )
135 #define tskDELETED_CHAR      ( 'D' )
136 #define tskSUSPENDED_CHAR    ( 'S' )
137
138 /*
139  * Some kernel aware debuggers require the data the debugger needs access to to
140  * be global, rather than file scope.
141  */
142 #ifdef portREMOVE_STATIC_QUALIFIER
143     #define static
144 #endif
145
146 /* The name allocated to the Idle task.  This can be overridden by defining
147  * configIDLE_TASK_NAME in FreeRTOSConfig.h. */
148 #ifndef configIDLE_TASK_NAME
149     #define configIDLE_TASK_NAME    "IDLE"
150 #endif
151
152 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
153
154 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
155  * performed in a generic way that is not optimised to any particular
156  * microcontroller architecture. */
157
158 /* uxTopReadyPriority holds the priority of the highest priority ready
159  * state task. */
160     #define taskRECORD_READY_PRIORITY( uxPriority ) \
161     do {                                            \
162         if( ( uxPriority ) > uxTopReadyPriority )   \
163         {                                           \
164             uxTopReadyPriority = ( uxPriority );    \
165         }                                           \
166     } while( 0 ) /* taskRECORD_READY_PRIORITY */
167
168 /*-----------------------------------------------------------*/
169
170     #if ( configNUMBER_OF_CORES == 1 )
171         #define taskSELECT_HIGHEST_PRIORITY_TASK()                            \
172     do {                                                                      \
173         UBaseType_t uxTopPriority = uxTopReadyPriority;                       \
174                                                                               \
175         /* Find the highest priority queue that contains ready tasks. */      \
176         while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
177         {                                                                     \
178             configASSERT( uxTopPriority );                                    \
179             --uxTopPriority;                                                  \
180         }                                                                     \
181                                                                               \
182         /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
183          * the  same priority get an equal share of the processor time. */                    \
184         listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
185         uxTopReadyPriority = uxTopPriority;                                                   \
186     } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */
187     #else /* if ( configNUMBER_OF_CORES == 1 ) */
188
189         #define taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID )    prvSelectHighestPriorityTask( xCoreID )
190
191     #endif /* if ( configNUMBER_OF_CORES == 1 ) */
192
193 /*-----------------------------------------------------------*/
194
195 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
196  * they are only required when a port optimised method of task selection is
197  * being used. */
198     #define taskRESET_READY_PRIORITY( uxPriority )
199     #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
200
201 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
202
203 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
204  * performed in a way that is tailored to the particular microcontroller
205  * architecture being used. */
206
207 /* A port optimised version is provided.  Call the port defined macros. */
208     #define taskRECORD_READY_PRIORITY( uxPriority )    portRECORD_READY_PRIORITY( ( uxPriority ), uxTopReadyPriority )
209
210 /*-----------------------------------------------------------*/
211
212     #define taskSELECT_HIGHEST_PRIORITY_TASK()                                                  \
213     do {                                                                                        \
214         UBaseType_t uxTopPriority;                                                              \
215                                                                                                 \
216         /* Find the highest priority list that contains ready tasks. */                         \
217         portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority );                          \
218         configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
219         listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) );   \
220     } while( 0 )
221
222 /*-----------------------------------------------------------*/
223
224 /* A port optimised version is provided, call it only if the TCB being reset
225  * is being referenced from a ready list.  If it is referenced from a delayed
226  * or suspended list then it won't be in a ready list. */
227     #define taskRESET_READY_PRIORITY( uxPriority )                                                     \
228     do {                                                                                               \
229         if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
230         {                                                                                              \
231             portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) );                        \
232         }                                                                                              \
233     } while( 0 )
234
235 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
236
237 /*-----------------------------------------------------------*/
238
239 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
240  * count overflows. */
241 #define taskSWITCH_DELAYED_LISTS()                                                \
242     do {                                                                          \
243         List_t * pxTemp;                                                          \
244                                                                                   \
245         /* The delayed tasks list should be empty when the lists are switched. */ \
246         configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) );               \
247                                                                                   \
248         pxTemp = pxDelayedTaskList;                                               \
249         pxDelayedTaskList = pxOverflowDelayedTaskList;                            \
250         pxOverflowDelayedTaskList = pxTemp;                                       \
251         xNumOfOverflows++;                                                        \
252         prvResetNextTaskUnblockTime();                                            \
253     } while( 0 )
254
255 /*-----------------------------------------------------------*/
256
257 /*
258  * Place the task represented by pxTCB into the appropriate ready list for
259  * the task.  It is inserted at the end of the list.
260  */
261 #define prvAddTaskToReadyList( pxTCB )                                                                     \
262     do {                                                                                                   \
263         traceMOVED_TASK_TO_READY_STATE( pxTCB );                                                           \
264         taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority );                                                \
265         listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
266         tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB );                                                      \
267     } while( 0 )
268 /*-----------------------------------------------------------*/
269
270 /*
271  * Several functions take a TaskHandle_t parameter that can optionally be NULL,
272  * where NULL is used to indicate that the handle of the currently executing
273  * task should be used in place of the parameter.  This macro simply checks to
274  * see if the parameter is NULL and returns a pointer to the appropriate TCB.
275  */
276 #define prvGetTCBFromHandle( pxHandle )    ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
277
278 /* The item value of the event list item is normally used to hold the priority
279  * of the task to which it belongs (coded to allow it to be held in reverse
280  * priority order).  However, it is occasionally borrowed for other purposes.  It
281  * is important its value is not updated due to a task priority change while it is
282  * being used for another purpose.  The following bit definition is used to inform
283  * the scheduler that the value should not be changed - in which case it is the
284  * responsibility of whichever module is using the value to ensure it gets set back
285  * to its original value when it is released. */
286 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
287     #define taskEVENT_LIST_ITEM_VALUE_IN_USE    ( ( uint16_t ) 0x8000U )
288 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
289     #define taskEVENT_LIST_ITEM_VALUE_IN_USE    ( ( uint32_t ) 0x80000000UL )
290 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
291     #define taskEVENT_LIST_ITEM_VALUE_IN_USE    ( ( uint64_t ) 0x8000000000000000ULL )
292 #endif
293
294 /* Indicates that the task is not actively running on any core. */
295 #define taskTASK_NOT_RUNNING           ( ( BaseType_t ) ( -1 ) )
296
297 /* Indicates that the task is actively running but scheduled to yield. */
298 #define taskTASK_SCHEDULED_TO_YIELD    ( ( BaseType_t ) ( -2 ) )
299
300 /* Returns pdTRUE if the task is actively running and not scheduled to yield. */
301 #if ( configNUMBER_OF_CORES == 1 )
302     #define taskTASK_IS_RUNNING( pxTCB )                          ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
303     #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB )    ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
304 #else
305     #define taskTASK_IS_RUNNING( pxTCB )                          ( ( ( ( pxTCB )->xTaskRunState >= ( BaseType_t ) 0 ) && ( ( pxTCB )->xTaskRunState < ( BaseType_t ) configNUMBER_OF_CORES ) ) ? ( pdTRUE ) : ( pdFALSE ) )
306     #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB )    ( ( ( pxTCB )->xTaskRunState != taskTASK_NOT_RUNNING ) ? ( pdTRUE ) : ( pdFALSE ) )
307 #endif
308
309 /* Indicates that the task is an Idle task. */
310 #define taskATTRIBUTE_IS_IDLE    ( UBaseType_t ) ( 1UL << 0UL )
311
312 #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) )
313     #define portGET_CRITICAL_NESTING_COUNT()          ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting )
314     #define portSET_CRITICAL_NESTING_COUNT( x )       ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting = ( x ) )
315     #define portINCREMENT_CRITICAL_NESTING_COUNT()    ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting++ )
316     #define portDECREMENT_CRITICAL_NESTING_COUNT()    ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- )
317 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */
318
319 #define taskBITS_PER_BYTE    ( ( size_t ) 8 )
320
321 #if ( configNUMBER_OF_CORES > 1 )
322
323 /* Yields the given core. This must be called from a critical section and xCoreID
324  * must be valid. This macro is not required in single core since there is only
325  * one core to yield. */
326     #define prvYieldCore( xCoreID )                                                          \
327     do {                                                                                     \
328         if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() )                                \
329         {                                                                                    \
330             /* Pending a yield for this core since it is in the critical section. */         \
331             xYieldPendings[ ( xCoreID ) ] = pdTRUE;                                          \
332         }                                                                                    \
333         else                                                                                 \
334         {                                                                                    \
335             /* Request other core to yield if it is not requested before. */                 \
336             if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
337             {                                                                                \
338                 portYIELD_CORE( xCoreID );                                                   \
339                 pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD;   \
340             }                                                                                \
341         }                                                                                    \
342     } while( 0 )
343 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
344 /*-----------------------------------------------------------*/
345
346 /*
347  * Task control block.  A task control block (TCB) is allocated for each task,
348  * and stores task state information, including a pointer to the task's context
349  * (the task's run time environment, including register values)
350  */
351 typedef struct tskTaskControlBlock       /* The old naming convention is used to prevent breaking kernel aware debuggers. */
352 {
353     volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack.  THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
354
355     #if ( portUSING_MPU_WRAPPERS == 1 )
356         xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer.  THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
357     #endif
358
359     #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
360         UBaseType_t uxCoreAffinityMask; /**< Used to link the task to certain cores.  UBaseType_t must have greater than or equal to the number of bits as configNUMBER_OF_CORES. */
361     #endif
362
363     ListItem_t xStateListItem;                  /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
364     ListItem_t xEventListItem;                  /**< Used to reference a task from an event list. */
365     UBaseType_t uxPriority;                     /**< The priority of the task.  0 is the lowest priority. */
366     StackType_t * pxStack;                      /**< Points to the start of the stack. */
367     #if ( configNUMBER_OF_CORES > 1 )
368         volatile BaseType_t xTaskRunState;      /**< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */
369         UBaseType_t uxTaskAttributes;           /**< Task's attributes - currently used to identify the idle tasks. */
370     #endif
371     char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created.  Facilitates debugging only. */
372
373     #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
374         BaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
375     #endif
376
377     #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
378         StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
379     #endif
380
381     #if ( portCRITICAL_NESTING_IN_TCB == 1 )
382         UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
383     #endif
384
385     #if ( configUSE_TRACE_FACILITY == 1 )
386         UBaseType_t uxTCBNumber;  /**< Stores a number that increments each time a TCB is created.  It allows debuggers to determine when a task has been deleted and then recreated. */
387         UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */
388     #endif
389
390     #if ( configUSE_MUTEXES == 1 )
391         UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */
392         UBaseType_t uxMutexesHeld;
393     #endif
394
395     #if ( configUSE_APPLICATION_TASK_TAG == 1 )
396         TaskHookFunction_t pxTaskTag;
397     #endif
398
399     #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
400         void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
401     #endif
402
403     #if ( configGENERATE_RUN_TIME_STATS == 1 )
404         configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */
405     #endif
406
407     #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
408         configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */
409     #endif
410
411     #if ( configUSE_TASK_NOTIFICATIONS == 1 )
412         volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
413         volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
414     #endif
415
416     /* See the comments in FreeRTOS.h with the definition of
417      * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
418     #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
419         uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
420     #endif
421
422     #if ( INCLUDE_xTaskAbortDelay == 1 )
423         uint8_t ucDelayAborted;
424     #endif
425
426     #if ( configUSE_POSIX_ERRNO == 1 )
427         int iTaskErrno;
428     #endif
429 } tskTCB;
430
431 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
432  * below to enable the use of older kernel aware debuggers. */
433 typedef tskTCB TCB_t;
434
435 #if ( configNUMBER_OF_CORES == 1 )
436     /* MISRA Ref 8.4.1 [Declaration shall be visible] */
437     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
438     /* coverity[misra_c_2012_rule_8_4_violation] */
439     portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
440 #else
441     /* MISRA Ref 8.4.1 [Declaration shall be visible] */
442     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
443     /* coverity[misra_c_2012_rule_8_4_violation] */
444     portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ];
445     #define pxCurrentTCB    xTaskGetCurrentTaskHandle()
446 #endif
447
448 /* Lists for ready and blocked tasks. --------------------
449  * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
450  * doing so breaks some kernel aware debuggers and debuggers that rely on removing
451  * the static qualifier. */
452 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */
453 PRIVILEGED_DATA static List_t xDelayedTaskList1;                         /**< Delayed tasks. */
454 PRIVILEGED_DATA static List_t xDelayedTaskList2;                         /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
455 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList;              /**< Points to the delayed task list currently being used. */
456 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList;      /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
457 PRIVILEGED_DATA static List_t xPendingReadyList;                         /**< Tasks that have been readied while the scheduler was suspended.  They will be moved to the ready list when the scheduler is resumed. */
458
459 #if ( INCLUDE_vTaskDelete == 1 )
460
461     PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */
462     PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
463
464 #endif
465
466 #if ( INCLUDE_vTaskSuspend == 1 )
467
468     PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */
469
470 #endif
471
472 /* Global POSIX errno. Its value is changed upon context switching to match
473  * the errno of the currently running task. */
474 #if ( configUSE_POSIX_ERRNO == 1 )
475     int FreeRTOS_errno = 0;
476 #endif
477
478 /* Other file private variables. --------------------------------*/
479 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
480 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
481 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
482 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
483 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
484 PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE };
485 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
486 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
487 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
488 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ];       /**< Holds the handles of the idle tasks.  The idle tasks are created automatically when the scheduler is started. */
489
490 /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
491  * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
492  * to determine the number of priority lists to read back from the remote target. */
493 static const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U;
494
495 /* Context switches are held pending while the scheduler is suspended.  Also,
496  * interrupts must not manipulate the xStateListItem of a TCB, or any of the
497  * lists the xStateListItem can be referenced from, if the scheduler is suspended.
498  * If an interrupt needs to unblock a task while the scheduler is suspended then it
499  * moves the task's event list item into the xPendingReadyList, ready for the
500  * kernel to move the task from the pending ready list into the real ready list
501  * when the scheduler is unsuspended.  The pending ready list itself can only be
502  * accessed from a critical section.
503  *
504  * Updates to uxSchedulerSuspended must be protected by both the task lock and the ISR lock
505  * and must not be done from an ISR. Reads must be protected by either lock and may be done
506  * from either an ISR or a task. */
507 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U;
508
509 #if ( configGENERATE_RUN_TIME_STATS == 1 )
510
511 /* Do not move these variables to function scope as doing so prevents the
512  * code working with debuggers that need to remove the static qualifier. */
513 PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0U };    /**< Holds the value of a timer/counter the last time a task was switched in. */
514 PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the total amount of execution time as defined by the run time counter clock. */
515
516 #endif
517
518 /*-----------------------------------------------------------*/
519
520 /* File private functions. --------------------------------*/
521
522 /*
523  * Creates the idle tasks during scheduler start.
524  */
525 static BaseType_t prvCreateIdleTasks( void );
526
527 #if ( configNUMBER_OF_CORES > 1 )
528
529 /*
530  * Checks to see if another task moved the current task out of the ready
531  * list while it was waiting to enter a critical section and yields, if so.
532  */
533     static void prvCheckForRunStateChange( void );
534 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
535
536 #if ( configNUMBER_OF_CORES > 1 )
537
538 /*
539  * Yields a core, or cores if multiple priorities are not allowed to run
540  * simultaneously, to allow the task pxTCB to run.
541  */
542     static void prvYieldForTask( const TCB_t * pxTCB );
543 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
544
545 #if ( configNUMBER_OF_CORES > 1 )
546
547 /*
548  * Selects the highest priority available task for the given core.
549  */
550     static void prvSelectHighestPriorityTask( BaseType_t xCoreID );
551 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
552
553 /**
554  * Utility task that simply returns pdTRUE if the task referenced by xTask is
555  * currently in the Suspended state, or pdFALSE if the task referenced by xTask
556  * is in any other state.
557  */
558 #if ( INCLUDE_vTaskSuspend == 1 )
559
560     static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
561
562 #endif /* INCLUDE_vTaskSuspend */
563
564 /*
565  * Utility to ready all the lists used by the scheduler.  This is called
566  * automatically upon the creation of the first task.
567  */
568 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
569
570 /*
571  * The idle task, which as all tasks is implemented as a never ending loop.
572  * The idle task is automatically created and added to the ready lists upon
573  * creation of the first user task.
574  *
575  * In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 passive idle tasks are also
576  * created to ensure that each core has an idle task to run when no other
577  * task is available to run.
578  *
579  * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
580  * language extensions.  The equivalent prototype for these functions are:
581  *
582  * void prvIdleTask( void *pvParameters );
583  * void prvPassiveIdleTask( void *pvParameters );
584  *
585  */
586 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
587 #if ( configNUMBER_OF_CORES > 1 )
588     static portTASK_FUNCTION_PROTO( prvPassiveIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
589 #endif
590
591 /*
592  * Utility to free all memory allocated by the scheduler to hold a TCB,
593  * including the stack pointed to by the TCB.
594  *
595  * This does not free memory allocated by the task itself (i.e. memory
596  * allocated by calls to pvPortMalloc from within the tasks application code).
597  */
598 #if ( INCLUDE_vTaskDelete == 1 )
599
600     static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
601
602 #endif
603
604 /*
605  * Used only by the idle task.  This checks to see if anything has been placed
606  * in the list of tasks waiting to be deleted.  If so the task is cleaned up
607  * and its TCB deleted.
608  */
609 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
610
611 /*
612  * The currently executing task is entering the Blocked state.  Add the task to
613  * either the current or the overflow delayed task list.
614  */
615 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
616                                             const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
617
618 /*
619  * Fills an TaskStatus_t structure with information on each task that is
620  * referenced from the pxList list (which may be a ready list, a delayed list,
621  * a suspended list, etc.).
622  *
623  * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
624  * NORMAL APPLICATION CODE.
625  */
626 #if ( configUSE_TRACE_FACILITY == 1 )
627
628     static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
629                                                      List_t * pxList,
630                                                      eTaskState eState ) PRIVILEGED_FUNCTION;
631
632 #endif
633
634 /*
635  * Searches pxList for a task with name pcNameToQuery - returning a handle to
636  * the task if it is found, or NULL if the task is not found.
637  */
638 #if ( INCLUDE_xTaskGetHandle == 1 )
639
640     static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
641                                                      const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
642
643 #endif
644
645 /*
646  * When a task is created, the stack of the task is filled with a known value.
647  * This function determines the 'high water mark' of the task stack by
648  * determining how much of the stack remains at the original preset value.
649  */
650 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
651
652     static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
653
654 #endif
655
656 /*
657  * Return the amount of time, in ticks, that will pass before the kernel will
658  * next move a task from the Blocked state to the Running state.
659  *
660  * This conditional compilation should use inequality to 0, not equality to 1.
661  * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
662  * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
663  * set to a value other than 1.
664  */
665 #if ( configUSE_TICKLESS_IDLE != 0 )
666
667     static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
668
669 #endif
670
671 /*
672  * Set xNextTaskUnblockTime to the time at which the next Blocked state task
673  * will exit the Blocked state.
674  */
675 static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
676
677 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
678
679 /*
680  * Helper function used to pad task names with spaces when printing out
681  * human readable tables of task information.
682  */
683     static char * prvWriteNameToBuffer( char * pcBuffer,
684                                         const char * pcTaskName ) PRIVILEGED_FUNCTION;
685
686 #endif
687
688 /*
689  * Called after a Task_t structure has been allocated either statically or
690  * dynamically to fill in the structure's members.
691  */
692 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
693                                   const char * const pcName,
694                                   const uint32_t ulStackDepth,
695                                   void * const pvParameters,
696                                   UBaseType_t uxPriority,
697                                   TaskHandle_t * const pxCreatedTask,
698                                   TCB_t * pxNewTCB,
699                                   const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
700
701 /*
702  * Called after a new task has been created and initialised to place the task
703  * under the control of the scheduler.
704  */
705 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
706
707 /*
708  * Create a task with static buffer for both TCB and stack. Returns a handle to
709  * the task if it is created successfully. Otherwise, returns NULL.
710  */
711 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
712     static TCB_t * prvCreateStaticTask( TaskFunction_t pxTaskCode,
713                                         const char * const pcName,
714                                         const uint32_t ulStackDepth,
715                                         void * const pvParameters,
716                                         UBaseType_t uxPriority,
717                                         StackType_t * const puxStackBuffer,
718                                         StaticTask_t * const pxTaskBuffer,
719                                         TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
720 #endif /* #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
721
722 /*
723  * Create a restricted task with static buffer for both TCB and stack. Returns
724  * a handle to the task if it is created successfully. Otherwise, returns NULL.
725  */
726 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
727     static TCB_t * prvCreateRestrictedStaticTask( const TaskParameters_t * const pxTaskDefinition,
728                                                   TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
729 #endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
730
731 /*
732  * Create a restricted task with static buffer for task stack and allocated buffer
733  * for TCB. Returns a handle to the task if it is created successfully. Otherwise,
734  * returns NULL.
735  */
736 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
737     static TCB_t * prvCreateRestrictedTask( const TaskParameters_t * const pxTaskDefinition,
738                                             TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
739 #endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
740
741 /*
742  * Create a task with allocated buffer for both TCB and stack. Returns a handle to
743  * the task if it is created successfully. Otherwise, returns NULL.
744  */
745 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
746     static TCB_t * prvCreateTask( TaskFunction_t pxTaskCode,
747                                   const char * const pcName,
748                                   const configSTACK_DEPTH_TYPE usStackDepth,
749                                   void * const pvParameters,
750                                   UBaseType_t uxPriority,
751                                   TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
752 #endif /* #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */
753
754 /*
755  * freertos_tasks_c_additions_init() should only be called if the user definable
756  * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
757  * called by the function.
758  */
759 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
760
761     static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
762
763 #endif
764
765 #if ( configUSE_PASSIVE_IDLE_HOOK == 1 )
766     extern void vApplicationPassiveIdleHook( void );
767 #endif /* #if ( configUSE_PASSIVE_IDLE_HOOK == 1 ) */
768
769 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
770
771 /*
772  * Convert the snprintf return value to the number of characters
773  * written. The following are the possible cases:
774  *
775  * 1. The buffer supplied to snprintf is large enough to hold the
776  *    generated string. The return value in this case is the number
777  *    of characters actually written, not counting the terminating
778  *    null character.
779  * 2. The buffer supplied to snprintf is NOT large enough to hold
780  *    the generated string. The return value in this case is the
781  *    number of characters that would have been written if the
782  *    buffer had been sufficiently large, not counting the
783  *    terminating null character.
784  * 3. Encoding error. The return value in this case is a negative
785  *    number.
786  *
787  * From 1 and 2 above ==> Only when the return value is non-negative
788  * and less than the supplied buffer length, the string has been
789  * completely written.
790  */
791     static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
792                                                         size_t n );
793
794 #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
795 /*-----------------------------------------------------------*/
796
797 #if ( configNUMBER_OF_CORES > 1 )
798     static void prvCheckForRunStateChange( void )
799     {
800         UBaseType_t uxPrevCriticalNesting;
801         const TCB_t * pxThisTCB;
802
803         /* This must only be called from within a task. */
804         portASSERT_IF_IN_ISR();
805
806         /* This function is always called with interrupts disabled
807          * so this is safe. */
808         pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ];
809
810         while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
811         {
812             /* We are only here if we just entered a critical section
813             * or if we just suspended the scheduler, and another task
814             * has requested that we yield.
815             *
816             * This is slightly complicated since we need to save and restore
817             * the suspension and critical nesting counts, as well as release
818             * and reacquire the correct locks. And then, do it all over again
819             * if our state changed again during the reacquisition. */
820             uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT();
821
822             if( uxPrevCriticalNesting > 0U )
823             {
824                 portSET_CRITICAL_NESTING_COUNT( 0U );
825                 portRELEASE_ISR_LOCK();
826             }
827             else
828             {
829                 /* The scheduler is suspended. uxSchedulerSuspended is updated
830                  * only when the task is not requested to yield. */
831                 mtCOVERAGE_TEST_MARKER();
832             }
833
834             portRELEASE_TASK_LOCK();
835             portMEMORY_BARRIER();
836             configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD );
837
838             portENABLE_INTERRUPTS();
839
840             /* Enabling interrupts should cause this core to immediately
841              * service the pending interrupt and yield. If the run state is still
842              * yielding here then that is a problem. */
843             configASSERT( pxThisTCB->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD );
844
845             portDISABLE_INTERRUPTS();
846             portGET_TASK_LOCK();
847             portGET_ISR_LOCK();
848
849             portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting );
850
851             if( uxPrevCriticalNesting == 0U )
852             {
853                 portRELEASE_ISR_LOCK();
854             }
855         }
856     }
857 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
858
859 /*-----------------------------------------------------------*/
860
861 #if ( configNUMBER_OF_CORES > 1 )
862     static void prvYieldForTask( const TCB_t * pxTCB )
863     {
864         BaseType_t xLowestPriorityToPreempt;
865         BaseType_t xCurrentCoreTaskPriority;
866         BaseType_t xLowestPriorityCore = ( BaseType_t ) -1;
867         BaseType_t xCoreID;
868
869         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
870             BaseType_t xYieldCount = 0;
871         #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
872
873         /* This must be called from a critical section. */
874         configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
875
876         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
877
878             /* No task should yield for this one if it is a lower priority
879              * than priority level of currently ready tasks. */
880             if( pxTCB->uxPriority >= uxTopReadyPriority )
881         #else
882             /* Yield is not required for a task which is already running. */
883             if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
884         #endif
885         {
886             xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority;
887
888             /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB
889              * is 0. This is ok as we will give system idle tasks a priority of -1 below. */
890             --xLowestPriorityToPreempt;
891
892             for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
893             {
894                 xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority;
895
896                 /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */
897                 if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
898                 {
899                     xCurrentCoreTaskPriority = xCurrentCoreTaskPriority - 1;
900                 }
901
902                 if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) )
903                 {
904                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
905                         if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
906                     #endif
907                     {
908                         if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt )
909                         {
910                             #if ( configUSE_CORE_AFFINITY == 1 )
911                                 if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
912                             #endif
913                             {
914                                 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
915                                     if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
916                                 #endif
917                                 {
918                                     xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
919                                     xLowestPriorityCore = xCoreID;
920                                 }
921                             }
922                         }
923                         else
924                         {
925                             mtCOVERAGE_TEST_MARKER();
926                         }
927                     }
928
929                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
930                     {
931                         /* Yield all currently running non-idle tasks with a priority lower than
932                          * the task that needs to run. */
933                         if( ( xCurrentCoreTaskPriority > ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) ) &&
934                             ( xCurrentCoreTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) )
935                         {
936                             prvYieldCore( xCoreID );
937                             xYieldCount++;
938                         }
939                         else
940                         {
941                             mtCOVERAGE_TEST_MARKER();
942                         }
943                     }
944                     #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
945                 }
946                 else
947                 {
948                     mtCOVERAGE_TEST_MARKER();
949                 }
950             }
951
952             #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
953                 if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) )
954             #else /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
955                 if( xLowestPriorityCore >= 0 )
956             #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
957             {
958                 prvYieldCore( xLowestPriorityCore );
959             }
960
961             #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
962                 /* Verify that the calling core always yields to higher priority tasks. */
963                 if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0U ) &&
964                     ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) )
965                 {
966                     configASSERT( ( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) ||
967                                   ( taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ) );
968                 }
969             #endif
970         }
971     }
972 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
973 /*-----------------------------------------------------------*/
974
975 #if ( configNUMBER_OF_CORES > 1 )
976     static void prvSelectHighestPriorityTask( BaseType_t xCoreID )
977     {
978         UBaseType_t uxCurrentPriority = uxTopReadyPriority;
979         BaseType_t xTaskScheduled = pdFALSE;
980         BaseType_t xDecrementTopPriority = pdTRUE;
981
982         #if ( configUSE_CORE_AFFINITY == 1 )
983             const TCB_t * pxPreviousTCB = NULL;
984         #endif
985         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
986             BaseType_t xPriorityDropped = pdFALSE;
987         #endif
988
989         /* This function should be called when scheduler is running. */
990         configASSERT( xSchedulerRunning == pdTRUE );
991
992         /* A new task is created and a running task with the same priority yields
993          * itself to run the new task. When a running task yields itself, it is still
994          * in the ready list. This running task will be selected before the new task
995          * since the new task is always added to the end of the ready list.
996          * The other problem is that the running task still in the same position of
997          * the ready list when it yields itself. It is possible that it will be selected
998          * earlier then other tasks which waits longer than this task.
999          *
1000          * To fix these problems, the running task should be put to the end of the
1001          * ready list before searching for the ready task in the ready list. */
1002         if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
1003                                      &pxCurrentTCBs[ xCoreID ]->xStateListItem ) == pdTRUE )
1004         {
1005             ( void ) uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem );
1006             vListInsertEnd( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
1007                             &pxCurrentTCBs[ xCoreID ]->xStateListItem );
1008         }
1009
1010         while( xTaskScheduled == pdFALSE )
1011         {
1012             #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1013             {
1014                 if( uxCurrentPriority < uxTopReadyPriority )
1015                 {
1016                     /* We can't schedule any tasks, other than idle, that have a
1017                      * priority lower than the priority of a task currently running
1018                      * on another core. */
1019                     uxCurrentPriority = tskIDLE_PRIORITY;
1020                 }
1021             }
1022             #endif
1023
1024             if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE )
1025             {
1026                 const List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] );
1027                 const ListItem_t * pxEndMarker = listGET_END_MARKER( pxReadyList );
1028                 ListItem_t * pxIterator;
1029
1030                 /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority
1031                  * must not be decremented any further. */
1032                 xDecrementTopPriority = pdFALSE;
1033
1034                 for( pxIterator = listGET_HEAD_ENTRY( pxReadyList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
1035                 {
1036                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
1037                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1038                     /* coverity[misra_c_2012_rule_11_5_violation] */
1039                     TCB_t * pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxIterator );
1040
1041                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1042                     {
1043                         /* When falling back to the idle priority because only one priority
1044                          * level is allowed to run at a time, we should ONLY schedule the true
1045                          * idle tasks, not user tasks at the idle priority. */
1046                         if( uxCurrentPriority < uxTopReadyPriority )
1047                         {
1048                             if( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0U )
1049                             {
1050                                 continue;
1051                             }
1052                         }
1053                     }
1054                     #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
1055
1056                     if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
1057                     {
1058                         #if ( configUSE_CORE_AFFINITY == 1 )
1059                             if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1060                         #endif
1061                         {
1062                             /* If the task is not being executed by any core swap it in. */
1063                             pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING;
1064                             #if ( configUSE_CORE_AFFINITY == 1 )
1065                                 pxPreviousTCB = pxCurrentTCBs[ xCoreID ];
1066                             #endif
1067                             pxTCB->xTaskRunState = xCoreID;
1068                             pxCurrentTCBs[ xCoreID ] = pxTCB;
1069                             xTaskScheduled = pdTRUE;
1070                         }
1071                     }
1072                     else if( pxTCB == pxCurrentTCBs[ xCoreID ] )
1073                     {
1074                         configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) );
1075
1076                         #if ( configUSE_CORE_AFFINITY == 1 )
1077                             if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1078                         #endif
1079                         {
1080                             /* The task is already running on this core, mark it as scheduled. */
1081                             pxTCB->xTaskRunState = xCoreID;
1082                             xTaskScheduled = pdTRUE;
1083                         }
1084                     }
1085                     else
1086                     {
1087                         /* This task is running on the core other than xCoreID. */
1088                         mtCOVERAGE_TEST_MARKER();
1089                     }
1090
1091                     if( xTaskScheduled != pdFALSE )
1092                     {
1093                         /* A task has been selected to run on this core. */
1094                         break;
1095                     }
1096                 }
1097             }
1098             else
1099             {
1100                 if( xDecrementTopPriority != pdFALSE )
1101                 {
1102                     uxTopReadyPriority--;
1103                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1104                     {
1105                         xPriorityDropped = pdTRUE;
1106                     }
1107                     #endif
1108                 }
1109             }
1110
1111             /* There are configNUMBER_OF_CORES Idle tasks created when scheduler started.
1112              * The scheduler should be able to select a task to run when uxCurrentPriority
1113              * is tskIDLE_PRIORITY. uxCurrentPriority is never decreased to value blow
1114              * tskIDLE_PRIORITY. */
1115             if( uxCurrentPriority > tskIDLE_PRIORITY )
1116             {
1117                 uxCurrentPriority--;
1118             }
1119             else
1120             {
1121                 /* This function is called when idle task is not created. Break the
1122                  * loop to prevent uxCurrentPriority overrun. */
1123                 break;
1124             }
1125         }
1126
1127         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1128         {
1129             if( xTaskScheduled == pdTRUE )
1130             {
1131                 if( xPriorityDropped != pdFALSE )
1132                 {
1133                     /* There may be several ready tasks that were being prevented from running because there was
1134                      * a higher priority task running. Now that the last of the higher priority tasks is no longer
1135                      * running, make sure all the other idle tasks yield. */
1136                     BaseType_t x;
1137
1138                     for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ )
1139                     {
1140                         if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1141                         {
1142                             prvYieldCore( x );
1143                         }
1144                     }
1145                 }
1146             }
1147         }
1148         #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
1149
1150         #if ( configUSE_CORE_AFFINITY == 1 )
1151         {
1152             if( xTaskScheduled == pdTRUE )
1153             {
1154                 if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) )
1155                 {
1156                     /* A ready task was just evicted from this core. See if it can be
1157                      * scheduled on any other core. */
1158                     UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask;
1159                     BaseType_t xLowestPriority = ( BaseType_t ) pxPreviousTCB->uxPriority;
1160                     BaseType_t xLowestPriorityCore = -1;
1161                     BaseType_t x;
1162
1163                     if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1164                     {
1165                         xLowestPriority = xLowestPriority - 1;
1166                     }
1167
1168                     if( ( uxCoreMap & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1169                     {
1170                         /* pxPreviousTCB was removed from this core and this core is not excluded
1171                          * from it's core affinity mask.
1172                          *
1173                          * pxPreviousTCB is preempted by the new higher priority task
1174                          * pxCurrentTCBs[ xCoreID ]. When searching a new core for pxPreviousTCB,
1175                          * we do not need to look at the cores on which pxCurrentTCBs[ xCoreID ]
1176                          * is allowed to run. The reason is - when more than one cores are
1177                          * eligible for an incoming task, we preempt the core with the minimum
1178                          * priority task. Because this core (i.e. xCoreID) was preempted for
1179                          * pxCurrentTCBs[ xCoreID ], this means that all the others cores
1180                          * where pxCurrentTCBs[ xCoreID ] can run, are running tasks with priority
1181                          * no lower than pxPreviousTCB's priority. Therefore, the only cores where
1182                          * which can be preempted for pxPreviousTCB are the ones where
1183                          * pxCurrentTCBs[ xCoreID ] is not allowed to run (and obviously,
1184                          * pxPreviousTCB is allowed to run).
1185                          *
1186                          * This is an optimization which reduces the number of cores needed to be
1187                          * searched for pxPreviousTCB to run. */
1188                         uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask );
1189                     }
1190                     else
1191                     {
1192                         /* pxPreviousTCB's core affinity mask is changed and it is no longer
1193                          * allowed to run on this core. Searching all the cores in pxPreviousTCB's
1194                          * new core affinity mask to find a core on which it can run. */
1195                     }
1196
1197                     uxCoreMap &= ( ( 1U << configNUMBER_OF_CORES ) - 1U );
1198
1199                     for( x = ( ( BaseType_t ) configNUMBER_OF_CORES - 1 ); x >= ( BaseType_t ) 0; x-- )
1200                     {
1201                         UBaseType_t uxCore = ( UBaseType_t ) x;
1202                         BaseType_t xTaskPriority;
1203
1204                         if( ( uxCoreMap & ( ( UBaseType_t ) 1U << uxCore ) ) != 0U )
1205                         {
1206                             xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority;
1207
1208                             if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1209                             {
1210                                 xTaskPriority = xTaskPriority - ( BaseType_t ) 1;
1211                             }
1212
1213                             uxCoreMap &= ~( ( UBaseType_t ) 1U << uxCore );
1214
1215                             if( ( xTaskPriority < xLowestPriority ) &&
1216                                 ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) &&
1217                                 ( xYieldPendings[ uxCore ] == pdFALSE ) )
1218                             {
1219                                 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
1220                                     if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE )
1221                                 #endif
1222                                 {
1223                                     xLowestPriority = xTaskPriority;
1224                                     xLowestPriorityCore = ( BaseType_t ) uxCore;
1225                                 }
1226                             }
1227                         }
1228                     }
1229
1230                     if( xLowestPriorityCore >= 0 )
1231                     {
1232                         prvYieldCore( xLowestPriorityCore );
1233                     }
1234                 }
1235             }
1236         }
1237         #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */
1238     }
1239
1240 #endif /* ( configNUMBER_OF_CORES > 1 ) */
1241
1242 /*-----------------------------------------------------------*/
1243
1244 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
1245
1246     static TCB_t * prvCreateStaticTask( TaskFunction_t pxTaskCode,
1247                                         const char * const pcName,
1248                                         const uint32_t ulStackDepth,
1249                                         void * const pvParameters,
1250                                         UBaseType_t uxPriority,
1251                                         StackType_t * const puxStackBuffer,
1252                                         StaticTask_t * const pxTaskBuffer,
1253                                         TaskHandle_t * const pxCreatedTask )
1254     {
1255         TCB_t * pxNewTCB;
1256
1257         configASSERT( puxStackBuffer != NULL );
1258         configASSERT( pxTaskBuffer != NULL );
1259
1260         #if ( configASSERT_DEFINED == 1 )
1261         {
1262             /* Sanity check that the size of the structure used to declare a
1263              * variable of type StaticTask_t equals the size of the real task
1264              * structure. */
1265             volatile size_t xSize = sizeof( StaticTask_t );
1266             configASSERT( xSize == sizeof( TCB_t ) );
1267             ( void ) xSize; /* Prevent unused variable warning when configASSERT() is not used. */
1268         }
1269         #endif /* configASSERT_DEFINED */
1270
1271         if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
1272         {
1273             /* The memory used for the task's TCB and stack are passed into this
1274              * function - use them. */
1275             /* MISRA Ref 11.3.1 [Misaligned access] */
1276             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
1277             /* coverity[misra_c_2012_rule_11_3_violation] */
1278             pxNewTCB = ( TCB_t * ) pxTaskBuffer;
1279             ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1280             pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
1281
1282             #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1283             {
1284                 /* Tasks can be created statically or dynamically, so note this
1285                  * task was created statically in case the task is later deleted. */
1286                 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
1287             }
1288             #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1289
1290             prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
1291         }
1292         else
1293         {
1294             pxNewTCB = NULL;
1295         }
1296
1297         return pxNewTCB;
1298     }
1299 /*-----------------------------------------------------------*/
1300
1301     TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
1302                                     const char * const pcName,
1303                                     const uint32_t ulStackDepth,
1304                                     void * const pvParameters,
1305                                     UBaseType_t uxPriority,
1306                                     StackType_t * const puxStackBuffer,
1307                                     StaticTask_t * const pxTaskBuffer )
1308     {
1309         TaskHandle_t xReturn = NULL;
1310         TCB_t * pxNewTCB;
1311
1312         traceENTER_xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer );
1313
1314         pxNewTCB = prvCreateStaticTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, &xReturn );
1315
1316         if( pxNewTCB != NULL )
1317         {
1318             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1319             {
1320                 /* Set the task's affinity before scheduling it. */
1321                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1322             }
1323             #endif
1324
1325             prvAddNewTaskToReadyList( pxNewTCB );
1326         }
1327         else
1328         {
1329             mtCOVERAGE_TEST_MARKER();
1330         }
1331
1332         traceRETURN_xTaskCreateStatic( xReturn );
1333
1334         return xReturn;
1335     }
1336 /*-----------------------------------------------------------*/
1337
1338     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1339         TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode,
1340                                                    const char * const pcName,
1341                                                    const uint32_t ulStackDepth,
1342                                                    void * const pvParameters,
1343                                                    UBaseType_t uxPriority,
1344                                                    StackType_t * const puxStackBuffer,
1345                                                    StaticTask_t * const pxTaskBuffer,
1346                                                    UBaseType_t uxCoreAffinityMask )
1347         {
1348             TaskHandle_t xReturn = NULL;
1349             TCB_t * pxNewTCB;
1350
1351             traceENTER_xTaskCreateStaticAffinitySet( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, uxCoreAffinityMask );
1352
1353             pxNewTCB = prvCreateStaticTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, &xReturn );
1354
1355             if( pxNewTCB != NULL )
1356             {
1357                 /* Set the task's affinity before scheduling it. */
1358                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1359
1360                 prvAddNewTaskToReadyList( pxNewTCB );
1361             }
1362             else
1363             {
1364                 mtCOVERAGE_TEST_MARKER();
1365             }
1366
1367             traceRETURN_xTaskCreateStaticAffinitySet( xReturn );
1368
1369             return xReturn;
1370         }
1371     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1372
1373 #endif /* SUPPORT_STATIC_ALLOCATION */
1374 /*-----------------------------------------------------------*/
1375
1376 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
1377     static TCB_t * prvCreateRestrictedStaticTask( const TaskParameters_t * const pxTaskDefinition,
1378                                                   TaskHandle_t * const pxCreatedTask )
1379     {
1380         TCB_t * pxNewTCB;
1381
1382         configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
1383         configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
1384
1385         if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
1386         {
1387             /* Allocate space for the TCB.  Where the memory comes from depends
1388              * on the implementation of the port malloc function and whether or
1389              * not static allocation is being used. */
1390             pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
1391             ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1392
1393             /* Store the stack location in the TCB. */
1394             pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
1395
1396             #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1397             {
1398                 /* Tasks can be created statically or dynamically, so note this
1399                  * task was created statically in case the task is later deleted. */
1400                 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
1401             }
1402             #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1403
1404             prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
1405                                   pxTaskDefinition->pcName,
1406                                   ( uint32_t ) pxTaskDefinition->usStackDepth,
1407                                   pxTaskDefinition->pvParameters,
1408                                   pxTaskDefinition->uxPriority,
1409                                   pxCreatedTask, pxNewTCB,
1410                                   pxTaskDefinition->xRegions );
1411         }
1412         else
1413         {
1414             pxNewTCB = NULL;
1415         }
1416
1417         return pxNewTCB;
1418     }
1419 /*-----------------------------------------------------------*/
1420
1421     BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
1422                                             TaskHandle_t * pxCreatedTask )
1423     {
1424         TCB_t * pxNewTCB;
1425         BaseType_t xReturn;
1426
1427         traceENTER_xTaskCreateRestrictedStatic( pxTaskDefinition, pxCreatedTask );
1428
1429         configASSERT( pxTaskDefinition != NULL );
1430
1431         pxNewTCB = prvCreateRestrictedStaticTask( pxTaskDefinition, pxCreatedTask );
1432
1433         if( pxNewTCB != NULL )
1434         {
1435             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1436             {
1437                 /* Set the task's affinity before scheduling it. */
1438                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1439             }
1440             #endif
1441
1442             prvAddNewTaskToReadyList( pxNewTCB );
1443             xReturn = pdPASS;
1444         }
1445         else
1446         {
1447             xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1448         }
1449
1450         traceRETURN_xTaskCreateRestrictedStatic( xReturn );
1451
1452         return xReturn;
1453     }
1454 /*-----------------------------------------------------------*/
1455
1456     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1457         BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition,
1458                                                            UBaseType_t uxCoreAffinityMask,
1459                                                            TaskHandle_t * pxCreatedTask )
1460         {
1461             TCB_t * pxNewTCB;
1462             BaseType_t xReturn;
1463
1464             traceENTER_xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, uxCoreAffinityMask, pxCreatedTask );
1465
1466             configASSERT( pxTaskDefinition != NULL );
1467
1468             pxNewTCB = prvCreateRestrictedStaticTask( pxTaskDefinition, pxCreatedTask );
1469
1470             if( pxNewTCB != NULL )
1471             {
1472                 /* Set the task's affinity before scheduling it. */
1473                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1474
1475                 prvAddNewTaskToReadyList( pxNewTCB );
1476                 xReturn = pdPASS;
1477             }
1478             else
1479             {
1480                 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1481             }
1482
1483             traceRETURN_xTaskCreateRestrictedStaticAffinitySet( xReturn );
1484
1485             return xReturn;
1486         }
1487     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1488
1489 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
1490 /*-----------------------------------------------------------*/
1491
1492 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
1493     static TCB_t * prvCreateRestrictedTask( const TaskParameters_t * const pxTaskDefinition,
1494                                             TaskHandle_t * const pxCreatedTask )
1495     {
1496         TCB_t * pxNewTCB;
1497
1498         configASSERT( pxTaskDefinition->puxStackBuffer );
1499
1500         if( pxTaskDefinition->puxStackBuffer != NULL )
1501         {
1502             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1503             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1504             /* coverity[misra_c_2012_rule_11_5_violation] */
1505             pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1506
1507             if( pxNewTCB != NULL )
1508             {
1509                 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1510
1511                 /* Store the stack location in the TCB. */
1512                 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
1513
1514                 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1515                 {
1516                     /* Tasks can be created statically or dynamically, so note
1517                      * this task had a statically allocated stack in case it is
1518                      * later deleted.  The TCB was allocated dynamically. */
1519                     pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
1520                 }
1521                 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1522
1523                 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
1524                                       pxTaskDefinition->pcName,
1525                                       ( uint32_t ) pxTaskDefinition->usStackDepth,
1526                                       pxTaskDefinition->pvParameters,
1527                                       pxTaskDefinition->uxPriority,
1528                                       pxCreatedTask, pxNewTCB,
1529                                       pxTaskDefinition->xRegions );
1530             }
1531         }
1532         else
1533         {
1534             pxNewTCB = NULL;
1535         }
1536
1537         return pxNewTCB;
1538     }
1539 /*-----------------------------------------------------------*/
1540
1541     BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
1542                                       TaskHandle_t * pxCreatedTask )
1543     {
1544         TCB_t * pxNewTCB;
1545         BaseType_t xReturn;
1546
1547         traceENTER_xTaskCreateRestricted( pxTaskDefinition, pxCreatedTask );
1548
1549         pxNewTCB = prvCreateRestrictedTask( pxTaskDefinition, pxCreatedTask );
1550
1551         if( pxNewTCB != NULL )
1552         {
1553             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1554             {
1555                 /* Set the task's affinity before scheduling it. */
1556                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1557             }
1558             #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1559
1560             prvAddNewTaskToReadyList( pxNewTCB );
1561
1562             xReturn = pdPASS;
1563         }
1564         else
1565         {
1566             xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1567         }
1568
1569         traceRETURN_xTaskCreateRestricted( xReturn );
1570
1571         return xReturn;
1572     }
1573 /*-----------------------------------------------------------*/
1574
1575     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1576         BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition,
1577                                                      UBaseType_t uxCoreAffinityMask,
1578                                                      TaskHandle_t * pxCreatedTask )
1579         {
1580             TCB_t * pxNewTCB;
1581             BaseType_t xReturn;
1582
1583             traceENTER_xTaskCreateRestrictedAffinitySet( pxTaskDefinition, uxCoreAffinityMask, pxCreatedTask );
1584
1585             pxNewTCB = prvCreateRestrictedTask( pxTaskDefinition, pxCreatedTask );
1586
1587             if( pxNewTCB != NULL )
1588             {
1589                 /* Set the task's affinity before scheduling it. */
1590                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1591
1592                 prvAddNewTaskToReadyList( pxNewTCB );
1593
1594                 xReturn = pdPASS;
1595             }
1596             else
1597             {
1598                 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1599             }
1600
1601             traceRETURN_xTaskCreateRestrictedAffinitySet( xReturn );
1602
1603             return xReturn;
1604         }
1605     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1606
1607
1608 #endif /* portUSING_MPU_WRAPPERS */
1609 /*-----------------------------------------------------------*/
1610
1611 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
1612     static TCB_t * prvCreateTask( TaskFunction_t pxTaskCode,
1613                                   const char * const pcName,
1614                                   const configSTACK_DEPTH_TYPE usStackDepth,
1615                                   void * const pvParameters,
1616                                   UBaseType_t uxPriority,
1617                                   TaskHandle_t * const pxCreatedTask )
1618     {
1619         TCB_t * pxNewTCB;
1620
1621         /* If the stack grows down then allocate the stack then the TCB so the stack
1622          * does not grow into the TCB.  Likewise if the stack grows up then allocate
1623          * the TCB then the stack. */
1624         #if ( portSTACK_GROWTH > 0 )
1625         {
1626             /* Allocate space for the TCB.  Where the memory comes from depends on
1627              * the implementation of the port malloc function and whether or not static
1628              * allocation is being used. */
1629             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1630             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1631             /* coverity[misra_c_2012_rule_11_5_violation] */
1632             pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1633
1634             if( pxNewTCB != NULL )
1635             {
1636                 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1637
1638                 /* Allocate space for the stack used by the task being created.
1639                  * The base of the stack memory stored in the TCB so the task can
1640                  * be deleted later if required. */
1641                 /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1642                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1643                 /* coverity[misra_c_2012_rule_11_5_violation] */
1644                 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) );
1645
1646                 if( pxNewTCB->pxStack == NULL )
1647                 {
1648                     /* Could not allocate the stack.  Delete the allocated TCB. */
1649                     vPortFree( pxNewTCB );
1650                     pxNewTCB = NULL;
1651                 }
1652             }
1653         }
1654         #else /* portSTACK_GROWTH */
1655         {
1656             StackType_t * pxStack;
1657
1658             /* Allocate space for the stack used by the task being created. */
1659             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1660             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1661             /* coverity[misra_c_2012_rule_11_5_violation] */
1662             pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) );
1663
1664             if( pxStack != NULL )
1665             {
1666                 /* Allocate space for the TCB. */
1667                 /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1668                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1669                 /* coverity[misra_c_2012_rule_11_5_violation] */
1670                 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1671
1672                 if( pxNewTCB != NULL )
1673                 {
1674                     ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1675
1676                     /* Store the stack location in the TCB. */
1677                     pxNewTCB->pxStack = pxStack;
1678                 }
1679                 else
1680                 {
1681                     /* The stack cannot be used as the TCB was not created.  Free
1682                      * it again. */
1683                     vPortFreeStack( pxStack );
1684                 }
1685             }
1686             else
1687             {
1688                 pxNewTCB = NULL;
1689             }
1690         }
1691         #endif /* portSTACK_GROWTH */
1692
1693         if( pxNewTCB != NULL )
1694         {
1695             #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1696             {
1697                 /* Tasks can be created statically or dynamically, so note this
1698                  * task was created dynamically in case it is later deleted. */
1699                 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
1700             }
1701             #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1702
1703             prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
1704         }
1705
1706         return pxNewTCB;
1707     }
1708 /*-----------------------------------------------------------*/
1709
1710     BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
1711                             const char * const pcName,
1712                             const configSTACK_DEPTH_TYPE usStackDepth,
1713                             void * const pvParameters,
1714                             UBaseType_t uxPriority,
1715                             TaskHandle_t * const pxCreatedTask )
1716     {
1717         TCB_t * pxNewTCB;
1718         BaseType_t xReturn;
1719
1720         traceENTER_xTaskCreate( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
1721
1722         pxNewTCB = prvCreateTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
1723
1724         if( pxNewTCB != NULL )
1725         {
1726             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1727             {
1728                 /* Set the task's affinity before scheduling it. */
1729                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1730             }
1731             #endif
1732
1733             prvAddNewTaskToReadyList( pxNewTCB );
1734             xReturn = pdPASS;
1735         }
1736         else
1737         {
1738             xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1739         }
1740
1741         traceRETURN_xTaskCreate( xReturn );
1742
1743         return xReturn;
1744     }
1745 /*-----------------------------------------------------------*/
1746
1747     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1748         BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode,
1749                                            const char * const pcName,
1750                                            const configSTACK_DEPTH_TYPE usStackDepth,
1751                                            void * const pvParameters,
1752                                            UBaseType_t uxPriority,
1753                                            UBaseType_t uxCoreAffinityMask,
1754                                            TaskHandle_t * const pxCreatedTask )
1755         {
1756             TCB_t * pxNewTCB;
1757             BaseType_t xReturn;
1758
1759             traceENTER_xTaskCreateAffinitySet( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, uxCoreAffinityMask, pxCreatedTask );
1760
1761             pxNewTCB = prvCreateTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
1762
1763             if( pxNewTCB != NULL )
1764             {
1765                 /* Set the task's affinity before scheduling it. */
1766                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1767
1768                 prvAddNewTaskToReadyList( pxNewTCB );
1769                 xReturn = pdPASS;
1770             }
1771             else
1772             {
1773                 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1774             }
1775
1776             traceRETURN_xTaskCreateAffinitySet( xReturn );
1777
1778             return xReturn;
1779         }
1780     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1781
1782 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1783 /*-----------------------------------------------------------*/
1784
1785 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
1786                                   const char * const pcName,
1787                                   const uint32_t ulStackDepth,
1788                                   void * const pvParameters,
1789                                   UBaseType_t uxPriority,
1790                                   TaskHandle_t * const pxCreatedTask,
1791                                   TCB_t * pxNewTCB,
1792                                   const MemoryRegion_t * const xRegions )
1793 {
1794     StackType_t * pxTopOfStack;
1795     UBaseType_t x;
1796
1797     #if ( portUSING_MPU_WRAPPERS == 1 )
1798         /* Should the task be created in privileged mode? */
1799         BaseType_t xRunPrivileged;
1800
1801         if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
1802         {
1803             xRunPrivileged = pdTRUE;
1804         }
1805         else
1806         {
1807             xRunPrivileged = pdFALSE;
1808         }
1809         uxPriority &= ~portPRIVILEGE_BIT;
1810     #endif /* portUSING_MPU_WRAPPERS == 1 */
1811
1812     /* Avoid dependency on memset() if it is not required. */
1813     #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
1814     {
1815         /* Fill the stack with a known value to assist debugging. */
1816         ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
1817     }
1818     #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
1819
1820     /* Calculate the top of stack address.  This depends on whether the stack
1821      * grows from high memory to low (as per the 80x86) or vice versa.
1822      * portSTACK_GROWTH is used to make the result positive or negative as required
1823      * by the port. */
1824     #if ( portSTACK_GROWTH < 0 )
1825     {
1826         pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
1827         pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
1828
1829         /* Check the alignment of the calculated top of stack is correct. */
1830         configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
1831
1832         #if ( configRECORD_STACK_HIGH_ADDRESS == 1 )
1833         {
1834             /* Also record the stack's high address, which may assist
1835              * debugging. */
1836             pxNewTCB->pxEndOfStack = pxTopOfStack;
1837         }
1838         #endif /* configRECORD_STACK_HIGH_ADDRESS */
1839     }
1840     #else /* portSTACK_GROWTH */
1841     {
1842         pxTopOfStack = pxNewTCB->pxStack;
1843         pxTopOfStack = ( StackType_t * ) ( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) + portBYTE_ALIGNMENT_MASK ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
1844
1845         /* Check the alignment of the calculated top of stack is correct. */
1846         configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
1847
1848         /* The other extreme of the stack space is required if stack checking is
1849          * performed. */
1850         pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
1851     }
1852     #endif /* portSTACK_GROWTH */
1853
1854     /* Store the task name in the TCB. */
1855     if( pcName != NULL )
1856     {
1857         for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
1858         {
1859             pxNewTCB->pcTaskName[ x ] = pcName[ x ];
1860
1861             /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
1862              * configMAX_TASK_NAME_LEN characters just in case the memory after the
1863              * string is not accessible (extremely unlikely). */
1864             if( pcName[ x ] == ( char ) 0x00 )
1865             {
1866                 break;
1867             }
1868             else
1869             {
1870                 mtCOVERAGE_TEST_MARKER();
1871             }
1872         }
1873
1874         /* Ensure the name string is terminated in the case that the string length
1875          * was greater or equal to configMAX_TASK_NAME_LEN. */
1876         pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1U ] = '\0';
1877     }
1878     else
1879     {
1880         mtCOVERAGE_TEST_MARKER();
1881     }
1882
1883     /* This is used as an array index so must ensure it's not too large. */
1884     configASSERT( uxPriority < configMAX_PRIORITIES );
1885
1886     if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1887     {
1888         uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1889     }
1890     else
1891     {
1892         mtCOVERAGE_TEST_MARKER();
1893     }
1894
1895     pxNewTCB->uxPriority = uxPriority;
1896     #if ( configUSE_MUTEXES == 1 )
1897     {
1898         pxNewTCB->uxBasePriority = uxPriority;
1899     }
1900     #endif /* configUSE_MUTEXES */
1901
1902     vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
1903     vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
1904
1905     /* Set the pxNewTCB as a link back from the ListItem_t.  This is so we can get
1906      * back to  the containing TCB from a generic item in a list. */
1907     listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
1908
1909     /* Event lists are always in priority order. */
1910     listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority );
1911     listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
1912
1913     #if ( portUSING_MPU_WRAPPERS == 1 )
1914     {
1915         vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1916     }
1917     #else
1918     {
1919         /* Avoid compiler warning about unreferenced parameter. */
1920         ( void ) xRegions;
1921     }
1922     #endif
1923
1924     #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
1925     {
1926         /* Allocate and initialize memory for the task's TLS Block. */
1927         configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack );
1928     }
1929     #endif
1930
1931     /* Initialize the TCB stack to look as if the task was already running,
1932      * but had been interrupted by the scheduler.  The return address is set
1933      * to the start of the task function. Once the stack has been initialised
1934      * the top of stack variable is updated. */
1935     #if ( portUSING_MPU_WRAPPERS == 1 )
1936     {
1937         /* If the port has capability to detect stack overflow,
1938          * pass the stack end address to the stack initialization
1939          * function as well. */
1940         #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1941         {
1942             #if ( portSTACK_GROWTH < 0 )
1943             {
1944                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1945             }
1946             #else /* portSTACK_GROWTH */
1947             {
1948                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1949             }
1950             #endif /* portSTACK_GROWTH */
1951         }
1952         #else /* portHAS_STACK_OVERFLOW_CHECKING */
1953         {
1954             pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1955         }
1956         #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1957     }
1958     #else /* portUSING_MPU_WRAPPERS */
1959     {
1960         /* If the port has capability to detect stack overflow,
1961          * pass the stack end address to the stack initialization
1962          * function as well. */
1963         #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1964         {
1965             #if ( portSTACK_GROWTH < 0 )
1966             {
1967                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1968             }
1969             #else /* portSTACK_GROWTH */
1970             {
1971                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1972             }
1973             #endif /* portSTACK_GROWTH */
1974         }
1975         #else /* portHAS_STACK_OVERFLOW_CHECKING */
1976         {
1977             pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1978         }
1979         #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1980     }
1981     #endif /* portUSING_MPU_WRAPPERS */
1982
1983     /* Initialize task state and task attributes. */
1984     #if ( configNUMBER_OF_CORES > 1 )
1985     {
1986         pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING;
1987
1988         /* Is this an idle task? */
1989         if( ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvIdleTask ) || ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvPassiveIdleTask ) )
1990         {
1991             pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE;
1992         }
1993     }
1994     #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
1995
1996     if( pxCreatedTask != NULL )
1997     {
1998         /* Pass the handle out in an anonymous way.  The handle can be used to
1999          * change the created task's priority, delete the created task, etc.*/
2000         *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
2001     }
2002     else
2003     {
2004         mtCOVERAGE_TEST_MARKER();
2005     }
2006 }
2007 /*-----------------------------------------------------------*/
2008
2009 #if ( configNUMBER_OF_CORES == 1 )
2010
2011     static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
2012     {
2013         /* Ensure interrupts don't access the task lists while the lists are being
2014          * updated. */
2015         taskENTER_CRITICAL();
2016         {
2017             uxCurrentNumberOfTasks++;
2018
2019             if( pxCurrentTCB == NULL )
2020             {
2021                 /* There are no other tasks, or all the other tasks are in
2022                  * the suspended state - make this the current task. */
2023                 pxCurrentTCB = pxNewTCB;
2024
2025                 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
2026                 {
2027                     /* This is the first task to be created so do the preliminary
2028                      * initialisation required.  We will not recover if this call
2029                      * fails, but we will report the failure. */
2030                     prvInitialiseTaskLists();
2031                 }
2032                 else
2033                 {
2034                     mtCOVERAGE_TEST_MARKER();
2035                 }
2036             }
2037             else
2038             {
2039                 /* If the scheduler is not already running, make this task the
2040                  * current task if it is the highest priority task to be created
2041                  * so far. */
2042                 if( xSchedulerRunning == pdFALSE )
2043                 {
2044                     if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
2045                     {
2046                         pxCurrentTCB = pxNewTCB;
2047                     }
2048                     else
2049                     {
2050                         mtCOVERAGE_TEST_MARKER();
2051                     }
2052                 }
2053                 else
2054                 {
2055                     mtCOVERAGE_TEST_MARKER();
2056                 }
2057             }
2058
2059             uxTaskNumber++;
2060
2061             #if ( configUSE_TRACE_FACILITY == 1 )
2062             {
2063                 /* Add a counter into the TCB for tracing only. */
2064                 pxNewTCB->uxTCBNumber = uxTaskNumber;
2065             }
2066             #endif /* configUSE_TRACE_FACILITY */
2067             traceTASK_CREATE( pxNewTCB );
2068
2069             prvAddTaskToReadyList( pxNewTCB );
2070
2071             portSETUP_TCB( pxNewTCB );
2072         }
2073         taskEXIT_CRITICAL();
2074
2075         if( xSchedulerRunning != pdFALSE )
2076         {
2077             /* If the created task is of a higher priority than the current task
2078              * then it should run now. */
2079             taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
2080         }
2081         else
2082         {
2083             mtCOVERAGE_TEST_MARKER();
2084         }
2085     }
2086
2087 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2088
2089     static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
2090     {
2091         /* Ensure interrupts don't access the task lists while the lists are being
2092          * updated. */
2093         taskENTER_CRITICAL();
2094         {
2095             uxCurrentNumberOfTasks++;
2096
2097             if( xSchedulerRunning == pdFALSE )
2098             {
2099                 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
2100                 {
2101                     /* This is the first task to be created so do the preliminary
2102                      * initialisation required.  We will not recover if this call
2103                      * fails, but we will report the failure. */
2104                     prvInitialiseTaskLists();
2105                 }
2106                 else
2107                 {
2108                     mtCOVERAGE_TEST_MARKER();
2109                 }
2110
2111                 if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
2112                 {
2113                     BaseType_t xCoreID;
2114
2115                     /* Check if a core is free. */
2116                     for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
2117                     {
2118                         if( pxCurrentTCBs[ xCoreID ] == NULL )
2119                         {
2120                             pxNewTCB->xTaskRunState = xCoreID;
2121                             pxCurrentTCBs[ xCoreID ] = pxNewTCB;
2122                             break;
2123                         }
2124                         else
2125                         {
2126                             mtCOVERAGE_TEST_MARKER();
2127                         }
2128                     }
2129                 }
2130                 else
2131                 {
2132                     mtCOVERAGE_TEST_MARKER();
2133                 }
2134             }
2135
2136             uxTaskNumber++;
2137
2138             #if ( configUSE_TRACE_FACILITY == 1 )
2139             {
2140                 /* Add a counter into the TCB for tracing only. */
2141                 pxNewTCB->uxTCBNumber = uxTaskNumber;
2142             }
2143             #endif /* configUSE_TRACE_FACILITY */
2144             traceTASK_CREATE( pxNewTCB );
2145
2146             prvAddTaskToReadyList( pxNewTCB );
2147
2148             portSETUP_TCB( pxNewTCB );
2149
2150             if( xSchedulerRunning != pdFALSE )
2151             {
2152                 /* If the created task is of a higher priority than another
2153                  * currently running task and preemption is on then it should
2154                  * run now. */
2155                 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
2156             }
2157             else
2158             {
2159                 mtCOVERAGE_TEST_MARKER();
2160             }
2161         }
2162         taskEXIT_CRITICAL();
2163     }
2164
2165 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2166 /*-----------------------------------------------------------*/
2167
2168 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
2169
2170     static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
2171                                                         size_t n )
2172     {
2173         size_t uxCharsWritten;
2174
2175         if( iSnprintfReturnValue < 0 )
2176         {
2177             /* Encoding error - Return 0 to indicate that nothing
2178              * was written to the buffer. */
2179             uxCharsWritten = 0;
2180         }
2181         else if( iSnprintfReturnValue >= ( int ) n )
2182         {
2183             /* This is the case when the supplied buffer is not
2184              * large to hold the generated string. Return the
2185              * number of characters actually written without
2186              * counting the terminating NULL character. */
2187             uxCharsWritten = n - 1U;
2188         }
2189         else
2190         {
2191             /* Complete string was written to the buffer. */
2192             uxCharsWritten = ( size_t ) iSnprintfReturnValue;
2193         }
2194
2195         return uxCharsWritten;
2196     }
2197
2198 #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
2199 /*-----------------------------------------------------------*/
2200
2201 #if ( INCLUDE_vTaskDelete == 1 )
2202
2203     void vTaskDelete( TaskHandle_t xTaskToDelete )
2204     {
2205         TCB_t * pxTCB;
2206
2207         traceENTER_vTaskDelete( xTaskToDelete );
2208
2209         taskENTER_CRITICAL();
2210         {
2211             /* If null is passed in here then it is the calling task that is
2212              * being deleted. */
2213             pxTCB = prvGetTCBFromHandle( xTaskToDelete );
2214
2215             /* Remove task from the ready/delayed list. */
2216             if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
2217             {
2218                 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
2219             }
2220             else
2221             {
2222                 mtCOVERAGE_TEST_MARKER();
2223             }
2224
2225             /* Is the task waiting on an event also? */
2226             if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2227             {
2228                 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2229             }
2230             else
2231             {
2232                 mtCOVERAGE_TEST_MARKER();
2233             }
2234
2235             /* Increment the uxTaskNumber also so kernel aware debuggers can
2236              * detect that the task lists need re-generating.  This is done before
2237              * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
2238              * not return. */
2239             uxTaskNumber++;
2240
2241             /* If the task is running (or yielding), we must add it to the
2242              * termination list so that an idle task can delete it when it is
2243              * no longer running. */
2244             if( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE )
2245             {
2246                 /* A running task or a task which is scheduled to yield is being
2247                  * deleted. This cannot complete when the task is still running
2248                  * on a core, as a context switch to another task is required.
2249                  * Place the task in the termination list. The idle task will check
2250                  * the termination list and free up any memory allocated by the
2251                  * scheduler for the TCB and stack of the deleted task. */
2252                 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
2253
2254                 /* Increment the ucTasksDeleted variable so the idle task knows
2255                  * there is a task that has been deleted and that it should therefore
2256                  * check the xTasksWaitingTermination list. */
2257                 ++uxDeletedTasksWaitingCleanUp;
2258
2259                 /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
2260                  * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
2261                 traceTASK_DELETE( pxTCB );
2262
2263                 /* The pre-delete hook is primarily for the Windows simulator,
2264                  * in which Windows specific clean up operations are performed,
2265                  * after which it is not possible to yield away from this task -
2266                  * hence xYieldPending is used to latch that a context switch is
2267                  * required. */
2268                 #if ( configNUMBER_OF_CORES == 1 )
2269                     portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) );
2270                 #else
2271                     portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
2272                 #endif
2273             }
2274             else
2275             {
2276                 --uxCurrentNumberOfTasks;
2277                 traceTASK_DELETE( pxTCB );
2278
2279                 /* Reset the next expected unblock time in case it referred to
2280                  * the task that has just been deleted. */
2281                 prvResetNextTaskUnblockTime();
2282             }
2283         }
2284
2285         #if ( configNUMBER_OF_CORES == 1 )
2286         {
2287             taskEXIT_CRITICAL();
2288
2289             /* If the task is not deleting itself, call prvDeleteTCB from outside of
2290              * critical section. If a task deletes itself, prvDeleteTCB is called
2291              * from prvCheckTasksWaitingTermination which is called from Idle task. */
2292             if( pxTCB != pxCurrentTCB )
2293             {
2294                 prvDeleteTCB( pxTCB );
2295             }
2296
2297             /* Force a reschedule if it is the currently running task that has just
2298              * been deleted. */
2299             if( xSchedulerRunning != pdFALSE )
2300             {
2301                 if( pxTCB == pxCurrentTCB )
2302                 {
2303                     configASSERT( uxSchedulerSuspended == 0 );
2304                     portYIELD_WITHIN_API();
2305                 }
2306                 else
2307                 {
2308                     mtCOVERAGE_TEST_MARKER();
2309                 }
2310             }
2311         }
2312         #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2313         {
2314             /* If a running task is not deleting itself, call prvDeleteTCB. If a running
2315              * task deletes itself, prvDeleteTCB is called from prvCheckTasksWaitingTermination
2316              * which is called from Idle task. */
2317             if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
2318             {
2319                 prvDeleteTCB( pxTCB );
2320             }
2321
2322             /* Force a reschedule if the task that has just been deleted was running. */
2323             if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) )
2324             {
2325                 if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
2326                 {
2327                     configASSERT( uxSchedulerSuspended == 0 );
2328                     vTaskYieldWithinAPI();
2329                 }
2330                 else
2331                 {
2332                     prvYieldCore( pxTCB->xTaskRunState );
2333                 }
2334             }
2335
2336             taskEXIT_CRITICAL();
2337         }
2338         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2339
2340         traceRETURN_vTaskDelete();
2341     }
2342
2343 #endif /* INCLUDE_vTaskDelete */
2344 /*-----------------------------------------------------------*/
2345
2346 #if ( INCLUDE_xTaskDelayUntil == 1 )
2347
2348     BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
2349                                 const TickType_t xTimeIncrement )
2350     {
2351         TickType_t xTimeToWake;
2352         BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
2353
2354         traceENTER_xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement );
2355
2356         configASSERT( pxPreviousWakeTime );
2357         configASSERT( ( xTimeIncrement > 0U ) );
2358
2359         vTaskSuspendAll();
2360         {
2361             /* Minor optimisation.  The tick count cannot change in this
2362              * block. */
2363             const TickType_t xConstTickCount = xTickCount;
2364
2365             configASSERT( uxSchedulerSuspended == 1U );
2366
2367             /* Generate the tick time at which the task wants to wake. */
2368             xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
2369
2370             if( xConstTickCount < *pxPreviousWakeTime )
2371             {
2372                 /* The tick count has overflowed since this function was
2373                  * lasted called.  In this case the only time we should ever
2374                  * actually delay is if the wake time has also  overflowed,
2375                  * and the wake time is greater than the tick time.  When this
2376                  * is the case it is as if neither time had overflowed. */
2377                 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
2378                 {
2379                     xShouldDelay = pdTRUE;
2380                 }
2381                 else
2382                 {
2383                     mtCOVERAGE_TEST_MARKER();
2384                 }
2385             }
2386             else
2387             {
2388                 /* The tick time has not overflowed.  In this case we will
2389                  * delay if either the wake time has overflowed, and/or the
2390                  * tick time is less than the wake time. */
2391                 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
2392                 {
2393                     xShouldDelay = pdTRUE;
2394                 }
2395                 else
2396                 {
2397                     mtCOVERAGE_TEST_MARKER();
2398                 }
2399             }
2400
2401             /* Update the wake time ready for the next call. */
2402             *pxPreviousWakeTime = xTimeToWake;
2403
2404             if( xShouldDelay != pdFALSE )
2405             {
2406                 traceTASK_DELAY_UNTIL( xTimeToWake );
2407
2408                 /* prvAddCurrentTaskToDelayedList() needs the block time, not
2409                  * the time to wake, so subtract the current tick count. */
2410                 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
2411             }
2412             else
2413             {
2414                 mtCOVERAGE_TEST_MARKER();
2415             }
2416         }
2417         xAlreadyYielded = xTaskResumeAll();
2418
2419         /* Force a reschedule if xTaskResumeAll has not already done so, we may
2420          * have put ourselves to sleep. */
2421         if( xAlreadyYielded == pdFALSE )
2422         {
2423             taskYIELD_WITHIN_API();
2424         }
2425         else
2426         {
2427             mtCOVERAGE_TEST_MARKER();
2428         }
2429
2430         traceRETURN_xTaskDelayUntil( xShouldDelay );
2431
2432         return xShouldDelay;
2433     }
2434
2435 #endif /* INCLUDE_xTaskDelayUntil */
2436 /*-----------------------------------------------------------*/
2437
2438 #if ( INCLUDE_vTaskDelay == 1 )
2439
2440     void vTaskDelay( const TickType_t xTicksToDelay )
2441     {
2442         BaseType_t xAlreadyYielded = pdFALSE;
2443
2444         traceENTER_vTaskDelay( xTicksToDelay );
2445
2446         /* A delay time of zero just forces a reschedule. */
2447         if( xTicksToDelay > ( TickType_t ) 0U )
2448         {
2449             vTaskSuspendAll();
2450             {
2451                 configASSERT( uxSchedulerSuspended == 1U );
2452
2453                 traceTASK_DELAY();
2454
2455                 /* A task that is removed from the event list while the
2456                  * scheduler is suspended will not get placed in the ready
2457                  * list or removed from the blocked list until the scheduler
2458                  * is resumed.
2459                  *
2460                  * This task cannot be in an event list as it is the currently
2461                  * executing task. */
2462                 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
2463             }
2464             xAlreadyYielded = xTaskResumeAll();
2465         }
2466         else
2467         {
2468             mtCOVERAGE_TEST_MARKER();
2469         }
2470
2471         /* Force a reschedule if xTaskResumeAll has not already done so, we may
2472          * have put ourselves to sleep. */
2473         if( xAlreadyYielded == pdFALSE )
2474         {
2475             taskYIELD_WITHIN_API();
2476         }
2477         else
2478         {
2479             mtCOVERAGE_TEST_MARKER();
2480         }
2481
2482         traceRETURN_vTaskDelay();
2483     }
2484
2485 #endif /* INCLUDE_vTaskDelay */
2486 /*-----------------------------------------------------------*/
2487
2488 #if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
2489
2490     eTaskState eTaskGetState( TaskHandle_t xTask )
2491     {
2492         eTaskState eReturn;
2493         List_t const * pxStateList;
2494         List_t const * pxEventList;
2495         List_t const * pxDelayedList;
2496         List_t const * pxOverflowedDelayedList;
2497         const TCB_t * const pxTCB = xTask;
2498
2499         traceENTER_eTaskGetState( xTask );
2500
2501         configASSERT( pxTCB );
2502
2503         #if ( configNUMBER_OF_CORES == 1 )
2504             if( pxTCB == pxCurrentTCB )
2505             {
2506                 /* The task calling this function is querying its own state. */
2507                 eReturn = eRunning;
2508             }
2509             else
2510         #endif
2511         {
2512             taskENTER_CRITICAL();
2513             {
2514                 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
2515                 pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) );
2516                 pxDelayedList = pxDelayedTaskList;
2517                 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
2518             }
2519             taskEXIT_CRITICAL();
2520
2521             if( pxEventList == &xPendingReadyList )
2522             {
2523                 /* The task has been placed on the pending ready list, so its
2524                  * state is eReady regardless of what list the task's state list
2525                  * item is currently placed on. */
2526                 eReturn = eReady;
2527             }
2528             else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
2529             {
2530                 /* The task being queried is referenced from one of the Blocked
2531                  * lists. */
2532                 eReturn = eBlocked;
2533             }
2534
2535             #if ( INCLUDE_vTaskSuspend == 1 )
2536                 else if( pxStateList == &xSuspendedTaskList )
2537                 {
2538                     /* The task being queried is referenced from the suspended
2539                      * list.  Is it genuinely suspended or is it blocked
2540                      * indefinitely? */
2541                     if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
2542                     {
2543                         #if ( configUSE_TASK_NOTIFICATIONS == 1 )
2544                         {
2545                             BaseType_t x;
2546
2547                             /* The task does not appear on the event list item of
2548                              * and of the RTOS objects, but could still be in the
2549                              * blocked state if it is waiting on its notification
2550                              * rather than waiting on an object.  If not, is
2551                              * suspended. */
2552                             eReturn = eSuspended;
2553
2554                             for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
2555                             {
2556                                 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
2557                                 {
2558                                     eReturn = eBlocked;
2559                                     break;
2560                                 }
2561                             }
2562                         }
2563                         #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2564                         {
2565                             eReturn = eSuspended;
2566                         }
2567                         #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2568                     }
2569                     else
2570                     {
2571                         eReturn = eBlocked;
2572                     }
2573                 }
2574             #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
2575
2576             #if ( INCLUDE_vTaskDelete == 1 )
2577                 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
2578                 {
2579                     /* The task being queried is referenced from the deleted
2580                      * tasks list, or it is not referenced from any lists at
2581                      * all. */
2582                     eReturn = eDeleted;
2583                 }
2584             #endif
2585
2586             else
2587             {
2588                 #if ( configNUMBER_OF_CORES == 1 )
2589                 {
2590                     /* If the task is not in any other state, it must be in the
2591                      * Ready (including pending ready) state. */
2592                     eReturn = eReady;
2593                 }
2594                 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2595                 {
2596                     if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2597                     {
2598                         /* Is it actively running on a core? */
2599                         eReturn = eRunning;
2600                     }
2601                     else
2602                     {
2603                         /* If the task is not in any other state, it must be in the
2604                          * Ready (including pending ready) state. */
2605                         eReturn = eReady;
2606                     }
2607                 }
2608                 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2609             }
2610         }
2611
2612         traceRETURN_eTaskGetState( eReturn );
2613
2614         return eReturn;
2615     }
2616
2617 #endif /* INCLUDE_eTaskGetState */
2618 /*-----------------------------------------------------------*/
2619
2620 #if ( INCLUDE_uxTaskPriorityGet == 1 )
2621
2622     UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
2623     {
2624         TCB_t const * pxTCB;
2625         UBaseType_t uxReturn;
2626
2627         traceENTER_uxTaskPriorityGet( xTask );
2628
2629         taskENTER_CRITICAL();
2630         {
2631             /* If null is passed in here then it is the priority of the task
2632              * that called uxTaskPriorityGet() that is being queried. */
2633             pxTCB = prvGetTCBFromHandle( xTask );
2634             uxReturn = pxTCB->uxPriority;
2635         }
2636         taskEXIT_CRITICAL();
2637
2638         traceRETURN_uxTaskPriorityGet( uxReturn );
2639
2640         return uxReturn;
2641     }
2642
2643 #endif /* INCLUDE_uxTaskPriorityGet */
2644 /*-----------------------------------------------------------*/
2645
2646 #if ( INCLUDE_uxTaskPriorityGet == 1 )
2647
2648     UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
2649     {
2650         TCB_t const * pxTCB;
2651         UBaseType_t uxReturn;
2652         UBaseType_t uxSavedInterruptStatus;
2653
2654         traceENTER_uxTaskPriorityGetFromISR( xTask );
2655
2656         /* RTOS ports that support interrupt nesting have the concept of a
2657          * maximum  system call (or maximum API call) interrupt priority.
2658          * Interrupts that are  above the maximum system call priority are keep
2659          * permanently enabled, even when the RTOS kernel is in a critical section,
2660          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
2661          * is defined in FreeRTOSConfig.h then
2662          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2663          * failure if a FreeRTOS API function is called from an interrupt that has
2664          * been assigned a priority above the configured maximum system call
2665          * priority.  Only FreeRTOS functions that end in FromISR can be called
2666          * from interrupts  that have been assigned a priority at or (logically)
2667          * below the maximum system call interrupt priority.  FreeRTOS maintains a
2668          * separate interrupt safe API to ensure interrupt entry is as fast and as
2669          * simple as possible.  More information (albeit Cortex-M specific) is
2670          * provided on the following link:
2671          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2672         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2673
2674         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2675         {
2676             /* If null is passed in here then it is the priority of the calling
2677              * task that is being queried. */
2678             pxTCB = prvGetTCBFromHandle( xTask );
2679             uxReturn = pxTCB->uxPriority;
2680         }
2681         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2682
2683         traceRETURN_uxTaskPriorityGetFromISR( uxReturn );
2684
2685         return uxReturn;
2686     }
2687
2688 #endif /* INCLUDE_uxTaskPriorityGet */
2689 /*-----------------------------------------------------------*/
2690
2691 #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) )
2692
2693     UBaseType_t uxTaskBasePriorityGet( const TaskHandle_t xTask )
2694     {
2695         TCB_t const * pxTCB;
2696         UBaseType_t uxReturn;
2697
2698         traceENTER_uxTaskBasePriorityGet( xTask );
2699
2700         taskENTER_CRITICAL();
2701         {
2702             /* If null is passed in here then it is the base priority of the task
2703              * that called uxTaskBasePriorityGet() that is being queried. */
2704             pxTCB = prvGetTCBFromHandle( xTask );
2705             uxReturn = pxTCB->uxBasePriority;
2706         }
2707         taskEXIT_CRITICAL();
2708
2709         traceRETURN_uxTaskBasePriorityGet( uxReturn );
2710
2711         return uxReturn;
2712     }
2713
2714 #endif /* #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) ) */
2715 /*-----------------------------------------------------------*/
2716
2717 #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) )
2718
2719     UBaseType_t uxTaskBasePriorityGetFromISR( const TaskHandle_t xTask )
2720     {
2721         TCB_t const * pxTCB;
2722         UBaseType_t uxReturn;
2723         UBaseType_t uxSavedInterruptStatus;
2724
2725         traceENTER_uxTaskBasePriorityGetFromISR( xTask );
2726
2727         /* RTOS ports that support interrupt nesting have the concept of a
2728          * maximum  system call (or maximum API call) interrupt priority.
2729          * Interrupts that are  above the maximum system call priority are keep
2730          * permanently enabled, even when the RTOS kernel is in a critical section,
2731          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
2732          * is defined in FreeRTOSConfig.h then
2733          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2734          * failure if a FreeRTOS API function is called from an interrupt that has
2735          * been assigned a priority above the configured maximum system call
2736          * priority.  Only FreeRTOS functions that end in FromISR can be called
2737          * from interrupts  that have been assigned a priority at or (logically)
2738          * below the maximum system call interrupt priority.  FreeRTOS maintains a
2739          * separate interrupt safe API to ensure interrupt entry is as fast and as
2740          * simple as possible.  More information (albeit Cortex-M specific) is
2741          * provided on the following link:
2742          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2743         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2744
2745         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2746         {
2747             /* If null is passed in here then it is the base priority of the calling
2748              * task that is being queried. */
2749             pxTCB = prvGetTCBFromHandle( xTask );
2750             uxReturn = pxTCB->uxBasePriority;
2751         }
2752         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2753
2754         traceRETURN_uxTaskBasePriorityGetFromISR( uxReturn );
2755
2756         return uxReturn;
2757     }
2758
2759 #endif /* #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) ) */
2760 /*-----------------------------------------------------------*/
2761
2762 #if ( INCLUDE_vTaskPrioritySet == 1 )
2763
2764     void vTaskPrioritySet( TaskHandle_t xTask,
2765                            UBaseType_t uxNewPriority )
2766     {
2767         TCB_t * pxTCB;
2768         UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
2769         BaseType_t xYieldRequired = pdFALSE;
2770
2771         #if ( configNUMBER_OF_CORES > 1 )
2772             BaseType_t xYieldForTask = pdFALSE;
2773         #endif
2774
2775         traceENTER_vTaskPrioritySet( xTask, uxNewPriority );
2776
2777         configASSERT( uxNewPriority < configMAX_PRIORITIES );
2778
2779         /* Ensure the new priority is valid. */
2780         if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
2781         {
2782             uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
2783         }
2784         else
2785         {
2786             mtCOVERAGE_TEST_MARKER();
2787         }
2788
2789         taskENTER_CRITICAL();
2790         {
2791             /* If null is passed in here then it is the priority of the calling
2792              * task that is being changed. */
2793             pxTCB = prvGetTCBFromHandle( xTask );
2794
2795             traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
2796
2797             #if ( configUSE_MUTEXES == 1 )
2798             {
2799                 uxCurrentBasePriority = pxTCB->uxBasePriority;
2800             }
2801             #else
2802             {
2803                 uxCurrentBasePriority = pxTCB->uxPriority;
2804             }
2805             #endif
2806
2807             if( uxCurrentBasePriority != uxNewPriority )
2808             {
2809                 /* The priority change may have readied a task of higher
2810                  * priority than a running task. */
2811                 if( uxNewPriority > uxCurrentBasePriority )
2812                 {
2813                     #if ( configNUMBER_OF_CORES == 1 )
2814                     {
2815                         if( pxTCB != pxCurrentTCB )
2816                         {
2817                             /* The priority of a task other than the currently
2818                              * running task is being raised.  Is the priority being
2819                              * raised above that of the running task? */
2820                             if( uxNewPriority > pxCurrentTCB->uxPriority )
2821                             {
2822                                 xYieldRequired = pdTRUE;
2823                             }
2824                             else
2825                             {
2826                                 mtCOVERAGE_TEST_MARKER();
2827                             }
2828                         }
2829                         else
2830                         {
2831                             /* The priority of the running task is being raised,
2832                              * but the running task must already be the highest
2833                              * priority task able to run so no yield is required. */
2834                         }
2835                     }
2836                     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2837                     {
2838                         /* The priority of a task is being raised so
2839                          * perform a yield for this task later. */
2840                         xYieldForTask = pdTRUE;
2841                     }
2842                     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2843                 }
2844                 else if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2845                 {
2846                     /* Setting the priority of a running task down means
2847                      * there may now be another task of higher priority that
2848                      * is ready to execute. */
2849                     #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
2850                         if( pxTCB->xPreemptionDisable == pdFALSE )
2851                     #endif
2852                     {
2853                         xYieldRequired = pdTRUE;
2854                     }
2855                 }
2856                 else
2857                 {
2858                     /* Setting the priority of any other task down does not
2859                      * require a yield as the running task must be above the
2860                      * new priority of the task being modified. */
2861                 }
2862
2863                 /* Remember the ready list the task might be referenced from
2864                  * before its uxPriority member is changed so the
2865                  * taskRESET_READY_PRIORITY() macro can function correctly. */
2866                 uxPriorityUsedOnEntry = pxTCB->uxPriority;
2867
2868                 #if ( configUSE_MUTEXES == 1 )
2869                 {
2870                     /* Only change the priority being used if the task is not
2871                      * currently using an inherited priority or the new priority
2872                      * is bigger than the inherited priority. */
2873                     if( ( pxTCB->uxBasePriority == pxTCB->uxPriority ) || ( uxNewPriority > pxTCB->uxPriority ) )
2874                     {
2875                         pxTCB->uxPriority = uxNewPriority;
2876                     }
2877                     else
2878                     {
2879                         mtCOVERAGE_TEST_MARKER();
2880                     }
2881
2882                     /* The base priority gets set whatever. */
2883                     pxTCB->uxBasePriority = uxNewPriority;
2884                 }
2885                 #else /* if ( configUSE_MUTEXES == 1 ) */
2886                 {
2887                     pxTCB->uxPriority = uxNewPriority;
2888                 }
2889                 #endif /* if ( configUSE_MUTEXES == 1 ) */
2890
2891                 /* Only reset the event list item value if the value is not
2892                  * being used for anything else. */
2893                 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0UL ) )
2894                 {
2895                     listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) );
2896                 }
2897                 else
2898                 {
2899                     mtCOVERAGE_TEST_MARKER();
2900                 }
2901
2902                 /* If the task is in the blocked or suspended list we need do
2903                  * nothing more than change its priority variable. However, if
2904                  * the task is in a ready list it needs to be removed and placed
2905                  * in the list appropriate to its new priority. */
2906                 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
2907                 {
2908                     /* The task is currently in its ready list - remove before
2909                      * adding it to its new ready list.  As we are in a critical
2910                      * section we can do this even if the scheduler is suspended. */
2911                     if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
2912                     {
2913                         /* It is known that the task is in its ready list so
2914                          * there is no need to check again and the port level
2915                          * reset macro can be called directly. */
2916                         portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
2917                     }
2918                     else
2919                     {
2920                         mtCOVERAGE_TEST_MARKER();
2921                     }
2922
2923                     prvAddTaskToReadyList( pxTCB );
2924                 }
2925                 else
2926                 {
2927                     #if ( configNUMBER_OF_CORES == 1 )
2928                     {
2929                         mtCOVERAGE_TEST_MARKER();
2930                     }
2931                     #else
2932                     {
2933                         /* It's possible that xYieldForTask was already set to pdTRUE because
2934                          * its priority is being raised. However, since it is not in a ready list
2935                          * we don't actually need to yield for it. */
2936                         xYieldForTask = pdFALSE;
2937                     }
2938                     #endif
2939                 }
2940
2941                 if( xYieldRequired != pdFALSE )
2942                 {
2943                     /* The running task priority is set down. Request the task to yield. */
2944                     taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB );
2945                 }
2946                 else
2947                 {
2948                     #if ( configNUMBER_OF_CORES > 1 )
2949                         if( xYieldForTask != pdFALSE )
2950                         {
2951                             /* The priority of the task is being raised. If a running
2952                              * task has priority lower than this task, it should yield
2953                              * for this task. */
2954                             taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
2955                         }
2956                         else
2957                     #endif /* if ( configNUMBER_OF_CORES > 1 ) */
2958                     {
2959                         mtCOVERAGE_TEST_MARKER();
2960                     }
2961                 }
2962
2963                 /* Remove compiler warning about unused variables when the port
2964                  * optimised task selection is not being used. */
2965                 ( void ) uxPriorityUsedOnEntry;
2966             }
2967         }
2968         taskEXIT_CRITICAL();
2969
2970         traceRETURN_vTaskPrioritySet();
2971     }
2972
2973 #endif /* INCLUDE_vTaskPrioritySet */
2974 /*-----------------------------------------------------------*/
2975
2976 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
2977     void vTaskCoreAffinitySet( const TaskHandle_t xTask,
2978                                UBaseType_t uxCoreAffinityMask )
2979     {
2980         TCB_t * pxTCB;
2981         BaseType_t xCoreID;
2982         UBaseType_t uxPrevCoreAffinityMask;
2983
2984         #if ( configUSE_PREEMPTION == 1 )
2985             UBaseType_t uxPrevNotAllowedCores;
2986         #endif
2987
2988         traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask );
2989
2990         taskENTER_CRITICAL();
2991         {
2992             pxTCB = prvGetTCBFromHandle( xTask );
2993
2994             uxPrevCoreAffinityMask = pxTCB->uxCoreAffinityMask;
2995             pxTCB->uxCoreAffinityMask = uxCoreAffinityMask;
2996
2997             if( xSchedulerRunning != pdFALSE )
2998             {
2999                 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
3000                 {
3001                     xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
3002
3003                     /* If the task can no longer run on the core it was running,
3004                      * request the core to yield. */
3005                     if( ( uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) == 0U )
3006                     {
3007                         prvYieldCore( xCoreID );
3008                     }
3009                 }
3010                 else
3011                 {
3012                     #if ( configUSE_PREEMPTION == 1 )
3013                     {
3014                         /* Calculate the cores on which this task was not allowed to
3015                          * run previously. */
3016                         uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1U << configNUMBER_OF_CORES ) - 1U );
3017
3018                         /* Does the new core mask enables this task to run on any of the
3019                          * previously not allowed cores? If yes, check if this task can be
3020                          * scheduled on any of those cores. */
3021                         if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U )
3022                         {
3023                             prvYieldForTask( pxTCB );
3024                         }
3025                     }
3026                     #else /* #if( configUSE_PREEMPTION == 1 ) */
3027                     {
3028                         mtCOVERAGE_TEST_MARKER();
3029                     }
3030                     #endif /* #if( configUSE_PREEMPTION == 1 ) */
3031                 }
3032             }
3033         }
3034         taskEXIT_CRITICAL();
3035
3036         traceRETURN_vTaskCoreAffinitySet();
3037     }
3038 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
3039 /*-----------------------------------------------------------*/
3040
3041 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
3042     UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask )
3043     {
3044         const TCB_t * pxTCB;
3045         UBaseType_t uxCoreAffinityMask;
3046
3047         traceENTER_vTaskCoreAffinityGet( xTask );
3048
3049         taskENTER_CRITICAL();
3050         {
3051             pxTCB = prvGetTCBFromHandle( xTask );
3052             uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
3053         }
3054         taskEXIT_CRITICAL();
3055
3056         traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask );
3057
3058         return uxCoreAffinityMask;
3059     }
3060 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
3061
3062 /*-----------------------------------------------------------*/
3063
3064 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
3065
3066     void vTaskPreemptionDisable( const TaskHandle_t xTask )
3067     {
3068         TCB_t * pxTCB;
3069
3070         traceENTER_vTaskPreemptionDisable( xTask );
3071
3072         taskENTER_CRITICAL();
3073         {
3074             pxTCB = prvGetTCBFromHandle( xTask );
3075
3076             pxTCB->xPreemptionDisable = pdTRUE;
3077         }
3078         taskEXIT_CRITICAL();
3079
3080         traceRETURN_vTaskPreemptionDisable();
3081     }
3082
3083 #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
3084 /*-----------------------------------------------------------*/
3085
3086 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
3087
3088     void vTaskPreemptionEnable( const TaskHandle_t xTask )
3089     {
3090         TCB_t * pxTCB;
3091         BaseType_t xCoreID;
3092
3093         traceENTER_vTaskPreemptionEnable( xTask );
3094
3095         taskENTER_CRITICAL();
3096         {
3097             pxTCB = prvGetTCBFromHandle( xTask );
3098
3099             pxTCB->xPreemptionDisable = pdFALSE;
3100
3101             if( xSchedulerRunning != pdFALSE )
3102             {
3103                 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
3104                 {
3105                     xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
3106                     prvYieldCore( xCoreID );
3107                 }
3108             }
3109         }
3110         taskEXIT_CRITICAL();
3111
3112         traceRETURN_vTaskPreemptionEnable();
3113     }
3114
3115 #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
3116 /*-----------------------------------------------------------*/
3117
3118 #if ( INCLUDE_vTaskSuspend == 1 )
3119
3120     void vTaskSuspend( TaskHandle_t xTaskToSuspend )
3121     {
3122         TCB_t * pxTCB;
3123
3124         #if ( configNUMBER_OF_CORES > 1 )
3125             BaseType_t xTaskRunningOnCore;
3126         #endif
3127
3128         traceENTER_vTaskSuspend( xTaskToSuspend );
3129
3130         taskENTER_CRITICAL();
3131         {
3132             /* If null is passed in here then it is the running task that is
3133              * being suspended. */
3134             pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
3135
3136             traceTASK_SUSPEND( pxTCB );
3137
3138             #if ( configNUMBER_OF_CORES > 1 )
3139                 xTaskRunningOnCore = pxTCB->xTaskRunState;
3140             #endif
3141
3142             /* Remove task from the ready/delayed list and place in the
3143              * suspended list. */
3144             if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
3145             {
3146                 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
3147             }
3148             else
3149             {
3150                 mtCOVERAGE_TEST_MARKER();
3151             }
3152
3153             /* Is the task waiting on an event also? */
3154             if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3155             {
3156                 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
3157             }
3158             else
3159             {
3160                 mtCOVERAGE_TEST_MARKER();
3161             }
3162
3163             vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
3164
3165             #if ( configUSE_TASK_NOTIFICATIONS == 1 )
3166             {
3167                 BaseType_t x;
3168
3169                 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
3170                 {
3171                     if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
3172                     {
3173                         /* The task was blocked to wait for a notification, but is
3174                          * now suspended, so no notification was received. */
3175                         pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
3176                     }
3177                 }
3178             }
3179             #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
3180         }
3181
3182         #if ( configNUMBER_OF_CORES == 1 )
3183         {
3184             taskEXIT_CRITICAL();
3185
3186             if( xSchedulerRunning != pdFALSE )
3187             {
3188                 /* Reset the next expected unblock time in case it referred to the
3189                  * task that is now in the Suspended state. */
3190                 taskENTER_CRITICAL();
3191                 {
3192                     prvResetNextTaskUnblockTime();
3193                 }
3194                 taskEXIT_CRITICAL();
3195             }
3196             else
3197             {
3198                 mtCOVERAGE_TEST_MARKER();
3199             }
3200
3201             if( pxTCB == pxCurrentTCB )
3202             {
3203                 if( xSchedulerRunning != pdFALSE )
3204                 {
3205                     /* The current task has just been suspended. */
3206                     configASSERT( uxSchedulerSuspended == 0 );
3207                     portYIELD_WITHIN_API();
3208                 }
3209                 else
3210                 {
3211                     /* The scheduler is not running, but the task that was pointed
3212                      * to by pxCurrentTCB has just been suspended and pxCurrentTCB
3213                      * must be adjusted to point to a different task. */
3214                     if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
3215                     {
3216                         /* No other tasks are ready, so set pxCurrentTCB back to
3217                          * NULL so when the next task is created pxCurrentTCB will
3218                          * be set to point to it no matter what its relative priority
3219                          * is. */
3220                         pxCurrentTCB = NULL;
3221                     }
3222                     else
3223                     {
3224                         vTaskSwitchContext();
3225                     }
3226                 }
3227             }
3228             else
3229             {
3230                 mtCOVERAGE_TEST_MARKER();
3231             }
3232         }
3233         #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3234         {
3235             if( xSchedulerRunning != pdFALSE )
3236             {
3237                 /* Reset the next expected unblock time in case it referred to the
3238                  * task that is now in the Suspended state. */
3239                 prvResetNextTaskUnblockTime();
3240             }
3241             else
3242             {
3243                 mtCOVERAGE_TEST_MARKER();
3244             }
3245
3246             if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
3247             {
3248                 if( xSchedulerRunning != pdFALSE )
3249                 {
3250                     if( xTaskRunningOnCore == ( BaseType_t ) portGET_CORE_ID() )
3251                     {
3252                         /* The current task has just been suspended. */
3253                         configASSERT( uxSchedulerSuspended == 0 );
3254                         vTaskYieldWithinAPI();
3255                     }
3256                     else
3257                     {
3258                         prvYieldCore( xTaskRunningOnCore );
3259                     }
3260                 }
3261                 else
3262                 {
3263                     /* This code path is not possible because only Idle tasks are
3264                      * assigned a core before the scheduler is started ( i.e.
3265                      * taskTASK_IS_RUNNING is only true for idle tasks before
3266                      * the scheduler is started ) and idle tasks cannot be
3267                      * suspended. */
3268                     mtCOVERAGE_TEST_MARKER();
3269                 }
3270             }
3271             else
3272             {
3273                 mtCOVERAGE_TEST_MARKER();
3274             }
3275
3276             taskEXIT_CRITICAL();
3277         }
3278         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3279
3280         traceRETURN_vTaskSuspend();
3281     }
3282
3283 #endif /* INCLUDE_vTaskSuspend */
3284 /*-----------------------------------------------------------*/
3285
3286 #if ( INCLUDE_vTaskSuspend == 1 )
3287
3288     static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
3289     {
3290         BaseType_t xReturn = pdFALSE;
3291         const TCB_t * const pxTCB = xTask;
3292
3293         /* Accesses xPendingReadyList so must be called from a critical
3294          * section. */
3295
3296         /* It does not make sense to check if the calling task is suspended. */
3297         configASSERT( xTask );
3298
3299         /* Is the task being resumed actually in the suspended list? */
3300         if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
3301         {
3302             /* Has the task already been resumed from within an ISR? */
3303             if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
3304             {
3305                 /* Is it in the suspended list because it is in the Suspended
3306                  * state, or because it is blocked with no timeout? */
3307                 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE )
3308                 {
3309                     #if ( configUSE_TASK_NOTIFICATIONS == 1 )
3310                     {
3311                         BaseType_t x;
3312
3313                         /* The task does not appear on the event list item of
3314                          * and of the RTOS objects, but could still be in the
3315                          * blocked state if it is waiting on its notification
3316                          * rather than waiting on an object.  If not, is
3317                          * suspended. */
3318                         xReturn = pdTRUE;
3319
3320                         for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
3321                         {
3322                             if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
3323                             {
3324                                 xReturn = pdFALSE;
3325                                 break;
3326                             }
3327                         }
3328                     }
3329                     #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
3330                     {
3331                         xReturn = pdTRUE;
3332                     }
3333                     #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
3334                 }
3335                 else
3336                 {
3337                     mtCOVERAGE_TEST_MARKER();
3338                 }
3339             }
3340             else
3341             {
3342                 mtCOVERAGE_TEST_MARKER();
3343             }
3344         }
3345         else
3346         {
3347             mtCOVERAGE_TEST_MARKER();
3348         }
3349
3350         return xReturn;
3351     }
3352
3353 #endif /* INCLUDE_vTaskSuspend */
3354 /*-----------------------------------------------------------*/
3355
3356 #if ( INCLUDE_vTaskSuspend == 1 )
3357
3358     void vTaskResume( TaskHandle_t xTaskToResume )
3359     {
3360         TCB_t * const pxTCB = xTaskToResume;
3361
3362         traceENTER_vTaskResume( xTaskToResume );
3363
3364         /* It does not make sense to resume the calling task. */
3365         configASSERT( xTaskToResume );
3366
3367         #if ( configNUMBER_OF_CORES == 1 )
3368
3369             /* The parameter cannot be NULL as it is impossible to resume the
3370              * currently executing task. */
3371             if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
3372         #else
3373
3374             /* The parameter cannot be NULL as it is impossible to resume the
3375              * currently executing task. It is also impossible to resume a task
3376              * that is actively running on another core but it is not safe
3377              * to check their run state here. Therefore, we get into a critical
3378              * section and check if the task is actually suspended or not. */
3379             if( pxTCB != NULL )
3380         #endif
3381         {
3382             taskENTER_CRITICAL();
3383             {
3384                 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
3385                 {
3386                     traceTASK_RESUME( pxTCB );
3387
3388                     /* The ready list can be accessed even if the scheduler is
3389                      * suspended because this is inside a critical section. */
3390                     ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3391                     prvAddTaskToReadyList( pxTCB );
3392
3393                     /* This yield may not cause the task just resumed to run,
3394                      * but will leave the lists in the correct state for the
3395                      * next yield. */
3396                     taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
3397                 }
3398                 else
3399                 {
3400                     mtCOVERAGE_TEST_MARKER();
3401                 }
3402             }
3403             taskEXIT_CRITICAL();
3404         }
3405         else
3406         {
3407             mtCOVERAGE_TEST_MARKER();
3408         }
3409
3410         traceRETURN_vTaskResume();
3411     }
3412
3413 #endif /* INCLUDE_vTaskSuspend */
3414
3415 /*-----------------------------------------------------------*/
3416
3417 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
3418
3419     BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
3420     {
3421         BaseType_t xYieldRequired = pdFALSE;
3422         TCB_t * const pxTCB = xTaskToResume;
3423         UBaseType_t uxSavedInterruptStatus;
3424
3425         traceENTER_xTaskResumeFromISR( xTaskToResume );
3426
3427         configASSERT( xTaskToResume );
3428
3429         /* RTOS ports that support interrupt nesting have the concept of a
3430          * maximum  system call (or maximum API call) interrupt priority.
3431          * Interrupts that are  above the maximum system call priority are keep
3432          * permanently enabled, even when the RTOS kernel is in a critical section,
3433          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
3434          * is defined in FreeRTOSConfig.h then
3435          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
3436          * failure if a FreeRTOS API function is called from an interrupt that has
3437          * been assigned a priority above the configured maximum system call
3438          * priority.  Only FreeRTOS functions that end in FromISR can be called
3439          * from interrupts  that have been assigned a priority at or (logically)
3440          * below the maximum system call interrupt priority.  FreeRTOS maintains a
3441          * separate interrupt safe API to ensure interrupt entry is as fast and as
3442          * simple as possible.  More information (albeit Cortex-M specific) is
3443          * provided on the following link:
3444          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
3445         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
3446
3447         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
3448         {
3449             if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
3450             {
3451                 traceTASK_RESUME_FROM_ISR( pxTCB );
3452
3453                 /* Check the ready lists can be accessed. */
3454                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
3455                 {
3456                     #if ( configNUMBER_OF_CORES == 1 )
3457                     {
3458                         /* Ready lists can be accessed so move the task from the
3459                          * suspended list to the ready list directly. */
3460                         if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
3461                         {
3462                             xYieldRequired = pdTRUE;
3463
3464                             /* Mark that a yield is pending in case the user is not
3465                              * using the return value to initiate a context switch
3466                              * from the ISR using portYIELD_FROM_ISR. */
3467                             xYieldPendings[ 0 ] = pdTRUE;
3468                         }
3469                         else
3470                         {
3471                             mtCOVERAGE_TEST_MARKER();
3472                         }
3473                     }
3474                     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3475
3476                     ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3477                     prvAddTaskToReadyList( pxTCB );
3478                 }
3479                 else
3480                 {
3481                     /* The delayed or ready lists cannot be accessed so the task
3482                      * is held in the pending ready list until the scheduler is
3483                      * unsuspended. */
3484                     vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
3485                 }
3486
3487                 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) )
3488                 {
3489                     prvYieldForTask( pxTCB );
3490
3491                     if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
3492                     {
3493                         xYieldRequired = pdTRUE;
3494                     }
3495                 }
3496                 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */
3497             }
3498             else
3499             {
3500                 mtCOVERAGE_TEST_MARKER();
3501             }
3502         }
3503         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
3504
3505         traceRETURN_xTaskResumeFromISR( xYieldRequired );
3506
3507         return xYieldRequired;
3508     }
3509
3510 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
3511 /*-----------------------------------------------------------*/
3512
3513 static BaseType_t prvCreateIdleTasks( void )
3514 {
3515     BaseType_t xReturn = pdPASS;
3516     BaseType_t xCoreID;
3517     char cIdleName[ configMAX_TASK_NAME_LEN ];
3518     TaskFunction_t pxIdleTaskFunction = NULL;
3519     BaseType_t xIdleTaskNameIndex;
3520
3521     for( xIdleTaskNameIndex = ( BaseType_t ) 0; xIdleTaskNameIndex < ( BaseType_t ) configMAX_TASK_NAME_LEN; xIdleTaskNameIndex++ )
3522     {
3523         cIdleName[ xIdleTaskNameIndex ] = configIDLE_TASK_NAME[ xIdleTaskNameIndex ];
3524
3525         /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
3526          * configMAX_TASK_NAME_LEN characters just in case the memory after the
3527          * string is not accessible (extremely unlikely). */
3528         if( cIdleName[ xIdleTaskNameIndex ] == ( char ) 0x00 )
3529         {
3530             break;
3531         }
3532         else
3533         {
3534             mtCOVERAGE_TEST_MARKER();
3535         }
3536     }
3537
3538     /* Add each idle task at the lowest priority. */
3539     for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
3540     {
3541         #if ( configNUMBER_OF_CORES == 1 )
3542         {
3543             pxIdleTaskFunction = prvIdleTask;
3544         }
3545         #else /* #if (  configNUMBER_OF_CORES == 1 ) */
3546         {
3547             /* In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 passive idle tasks
3548              * are also created to ensure that each core has an idle task to
3549              * run when no other task is available to run. */
3550             if( xCoreID == 0 )
3551             {
3552                 pxIdleTaskFunction = prvIdleTask;
3553             }
3554             else
3555             {
3556                 pxIdleTaskFunction = prvPassiveIdleTask;
3557             }
3558         }
3559         #endif /* #if (  configNUMBER_OF_CORES == 1 ) */
3560
3561         /* Update the idle task name with suffix to differentiate the idle tasks.
3562          * This function is not required in single core FreeRTOS since there is
3563          * only one idle task. */
3564         #if ( configNUMBER_OF_CORES > 1 )
3565         {
3566             /* Append the idle task number to the end of the name if there is space. */
3567             if( xIdleTaskNameIndex < ( BaseType_t ) configMAX_TASK_NAME_LEN )
3568             {
3569                 cIdleName[ xIdleTaskNameIndex ] = ( char ) ( xCoreID + '0' );
3570
3571                 /* And append a null character if there is space. */
3572                 if( ( xIdleTaskNameIndex + 1 ) < ( BaseType_t ) configMAX_TASK_NAME_LEN )
3573                 {
3574                     cIdleName[ xIdleTaskNameIndex + 1 ] = '\0';
3575                 }
3576                 else
3577                 {
3578                     mtCOVERAGE_TEST_MARKER();
3579                 }
3580             }
3581             else
3582             {
3583                 mtCOVERAGE_TEST_MARKER();
3584             }
3585         }
3586         #endif /* if ( configNUMBER_OF_CORES > 1 ) */
3587
3588         #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
3589         {
3590             StaticTask_t * pxIdleTaskTCBBuffer = NULL;
3591             StackType_t * pxIdleTaskStackBuffer = NULL;
3592             uint32_t ulIdleTaskStackSize;
3593
3594             /* The Idle task is created using user provided RAM - obtain the
3595              * address of the RAM then create the idle task. */
3596             #if ( configNUMBER_OF_CORES == 1 )
3597             {
3598                 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
3599             }
3600             #else
3601             {
3602                 if( xCoreID == 0 )
3603                 {
3604                     vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
3605                 }
3606                 else
3607                 {
3608                     vApplicationGetPassiveIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize, xCoreID - 1 );
3609                 }
3610             }
3611             #endif /* if ( configNUMBER_OF_CORES == 1 ) */
3612             xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( pxIdleTaskFunction,
3613                                                              cIdleName,
3614                                                              ulIdleTaskStackSize,
3615                                                              ( void * ) NULL,
3616                                                              portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3617                                                              pxIdleTaskStackBuffer,
3618                                                              pxIdleTaskTCBBuffer );
3619
3620             if( xIdleTaskHandles[ xCoreID ] != NULL )
3621             {
3622                 xReturn = pdPASS;
3623             }
3624             else
3625             {
3626                 xReturn = pdFAIL;
3627             }
3628         }
3629         #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
3630         {
3631             /* The Idle task is being created using dynamically allocated RAM. */
3632             xReturn = xTaskCreate( pxIdleTaskFunction,
3633                                    cIdleName,
3634                                    configMINIMAL_STACK_SIZE,
3635                                    ( void * ) NULL,
3636                                    portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3637                                    &xIdleTaskHandles[ xCoreID ] );
3638         }
3639         #endif /* configSUPPORT_STATIC_ALLOCATION */
3640
3641         /* Break the loop if any of the idle task is failed to be created. */
3642         if( xReturn == pdFAIL )
3643         {
3644             break;
3645         }
3646         else
3647         {
3648             mtCOVERAGE_TEST_MARKER();
3649         }
3650     }
3651
3652     return xReturn;
3653 }
3654
3655 /*-----------------------------------------------------------*/
3656
3657 void vTaskStartScheduler( void )
3658 {
3659     BaseType_t xReturn;
3660
3661     traceENTER_vTaskStartScheduler();
3662
3663     #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
3664     {
3665         /* Sanity check that the UBaseType_t must have greater than or equal to
3666          * the number of bits as confNUMBER_OF_CORES. */
3667         configASSERT( ( sizeof( UBaseType_t ) * taskBITS_PER_BYTE ) >= configNUMBER_OF_CORES );
3668     }
3669     #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
3670
3671     xReturn = prvCreateIdleTasks();
3672
3673     #if ( configUSE_TIMERS == 1 )
3674     {
3675         if( xReturn == pdPASS )
3676         {
3677             xReturn = xTimerCreateTimerTask();
3678         }
3679         else
3680         {
3681             mtCOVERAGE_TEST_MARKER();
3682         }
3683     }
3684     #endif /* configUSE_TIMERS */
3685
3686     if( xReturn == pdPASS )
3687     {
3688         /* freertos_tasks_c_additions_init() should only be called if the user
3689          * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
3690          * the only macro called by the function. */
3691         #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
3692         {
3693             freertos_tasks_c_additions_init();
3694         }
3695         #endif
3696
3697         /* Interrupts are turned off here, to ensure a tick does not occur
3698          * before or during the call to xPortStartScheduler().  The stacks of
3699          * the created tasks contain a status word with interrupts switched on
3700          * so interrupts will automatically get re-enabled when the first task
3701          * starts to run. */
3702         portDISABLE_INTERRUPTS();
3703
3704         #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
3705         {
3706             /* Switch C-Runtime's TLS Block to point to the TLS
3707              * block specific to the task that will run first. */
3708             configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
3709         }
3710         #endif
3711
3712         xNextTaskUnblockTime = portMAX_DELAY;
3713         xSchedulerRunning = pdTRUE;
3714         xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
3715
3716         /* If configGENERATE_RUN_TIME_STATS is defined then the following
3717          * macro must be defined to configure the timer/counter used to generate
3718          * the run time counter time base.   NOTE:  If configGENERATE_RUN_TIME_STATS
3719          * is set to 0 and the following line fails to build then ensure you do not
3720          * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
3721          * FreeRTOSConfig.h file. */
3722         portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
3723
3724         traceTASK_SWITCHED_IN();
3725
3726         /* Setting up the timer tick is hardware specific and thus in the
3727          * portable interface. */
3728
3729         /* The return value for xPortStartScheduler is not required
3730          * hence using a void datatype. */
3731         ( void ) xPortStartScheduler();
3732
3733         /* In most cases, xPortStartScheduler() will not return. If it
3734          * returns pdTRUE then there was not enough heap memory available
3735          * to create either the Idle or the Timer task. If it returned
3736          * pdFALSE, then the application called xTaskEndScheduler().
3737          * Most ports don't implement xTaskEndScheduler() as there is
3738          * nothing to return to. */
3739     }
3740     else
3741     {
3742         /* This line will only be reached if the kernel could not be started,
3743          * because there was not enough FreeRTOS heap to create the idle task
3744          * or the timer task. */
3745         configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
3746     }
3747
3748     /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
3749      * meaning xIdleTaskHandles are not used anywhere else. */
3750     ( void ) xIdleTaskHandles;
3751
3752     /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority
3753      * from getting optimized out as it is no longer used by the kernel. */
3754     ( void ) uxTopUsedPriority;
3755
3756     traceRETURN_vTaskStartScheduler();
3757 }
3758 /*-----------------------------------------------------------*/
3759
3760 void vTaskEndScheduler( void )
3761 {
3762     traceENTER_vTaskEndScheduler();
3763
3764     /* Stop the scheduler interrupts and call the portable scheduler end
3765      * routine so the original ISRs can be restored if necessary.  The port
3766      * layer must ensure interrupts enable  bit is left in the correct state. */
3767     portDISABLE_INTERRUPTS();
3768     xSchedulerRunning = pdFALSE;
3769     vPortEndScheduler();
3770
3771     traceRETURN_vTaskEndScheduler();
3772 }
3773 /*----------------------------------------------------------*/
3774
3775 void vTaskSuspendAll( void )
3776 {
3777     traceENTER_vTaskSuspendAll();
3778
3779     #if ( configNUMBER_OF_CORES == 1 )
3780     {
3781         /* A critical section is not required as the variable is of type
3782          * BaseType_t.  Please read Richard Barry's reply in the following link to a
3783          * post in the FreeRTOS support forum before reporting this as a bug! -
3784          * https://goo.gl/wu4acr */
3785
3786         /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
3787          * do not otherwise exhibit real time behaviour. */
3788         portSOFTWARE_BARRIER();
3789
3790         /* The scheduler is suspended if uxSchedulerSuspended is non-zero.  An increment
3791          * is used to allow calls to vTaskSuspendAll() to nest. */
3792         ++uxSchedulerSuspended;
3793
3794         /* Enforces ordering for ports and optimised compilers that may otherwise place
3795          * the above increment elsewhere. */
3796         portMEMORY_BARRIER();
3797     }
3798     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3799     {
3800         UBaseType_t ulState;
3801
3802         /* This must only be called from within a task. */
3803         portASSERT_IF_IN_ISR();
3804
3805         if( xSchedulerRunning != pdFALSE )
3806         {
3807             /* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks.
3808              * We must disable interrupts before we grab the locks in the event that this task is
3809              * interrupted and switches context before incrementing uxSchedulerSuspended.
3810              * It is safe to re-enable interrupts after releasing the ISR lock and incrementing
3811              * uxSchedulerSuspended since that will prevent context switches. */
3812             ulState = portSET_INTERRUPT_MASK();
3813
3814             /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
3815              * do not otherwise exhibit real time behaviour. */
3816             portSOFTWARE_BARRIER();
3817
3818             portGET_TASK_LOCK();
3819
3820             /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The
3821              * purpose is to prevent altering the variable when fromISR APIs are readying
3822              * it. */
3823             if( uxSchedulerSuspended == 0U )
3824             {
3825                 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
3826                 {
3827                     prvCheckForRunStateChange();
3828                 }
3829                 else
3830                 {
3831                     mtCOVERAGE_TEST_MARKER();
3832                 }
3833             }
3834             else
3835             {
3836                 mtCOVERAGE_TEST_MARKER();
3837             }
3838
3839             portGET_ISR_LOCK();
3840
3841             /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
3842              * is used to allow calls to vTaskSuspendAll() to nest. */
3843             ++uxSchedulerSuspended;
3844             portRELEASE_ISR_LOCK();
3845
3846             portCLEAR_INTERRUPT_MASK( ulState );
3847         }
3848         else
3849         {
3850             mtCOVERAGE_TEST_MARKER();
3851         }
3852     }
3853     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3854
3855     traceRETURN_vTaskSuspendAll();
3856 }
3857
3858 /*----------------------------------------------------------*/
3859
3860 #if ( configUSE_TICKLESS_IDLE != 0 )
3861
3862     static TickType_t prvGetExpectedIdleTime( void )
3863     {
3864         TickType_t xReturn;
3865         UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
3866
3867         /* uxHigherPriorityReadyTasks takes care of the case where
3868          * configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
3869          * task that are in the Ready state, even though the idle task is
3870          * running. */
3871         #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
3872         {
3873             if( uxTopReadyPriority > tskIDLE_PRIORITY )
3874             {
3875                 uxHigherPriorityReadyTasks = pdTRUE;
3876             }
3877         }
3878         #else
3879         {
3880             const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
3881
3882             /* When port optimised task selection is used the uxTopReadyPriority
3883              * variable is used as a bit map.  If bits other than the least
3884              * significant bit are set then there are tasks that have a priority
3885              * above the idle priority that are in the Ready state.  This takes
3886              * care of the case where the co-operative scheduler is in use. */
3887             if( uxTopReadyPriority > uxLeastSignificantBit )
3888             {
3889                 uxHigherPriorityReadyTasks = pdTRUE;
3890             }
3891         }
3892         #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
3893
3894         if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
3895         {
3896             xReturn = 0;
3897         }
3898         else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1U )
3899         {
3900             /* There are other idle priority tasks in the ready state.  If
3901              * time slicing is used then the very next tick interrupt must be
3902              * processed. */
3903             xReturn = 0;
3904         }
3905         else if( uxHigherPriorityReadyTasks != pdFALSE )
3906         {
3907             /* There are tasks in the Ready state that have a priority above the
3908              * idle priority.  This path can only be reached if
3909              * configUSE_PREEMPTION is 0. */
3910             xReturn = 0;
3911         }
3912         else
3913         {
3914             xReturn = xNextTaskUnblockTime;
3915             xReturn -= xTickCount;
3916         }
3917
3918         return xReturn;
3919     }
3920
3921 #endif /* configUSE_TICKLESS_IDLE */
3922 /*----------------------------------------------------------*/
3923
3924 BaseType_t xTaskResumeAll( void )
3925 {
3926     TCB_t * pxTCB = NULL;
3927     BaseType_t xAlreadyYielded = pdFALSE;
3928
3929     traceENTER_xTaskResumeAll();
3930
3931     #if ( configNUMBER_OF_CORES > 1 )
3932         if( xSchedulerRunning != pdFALSE )
3933     #endif
3934     {
3935         /* It is possible that an ISR caused a task to be removed from an event
3936          * list while the scheduler was suspended.  If this was the case then the
3937          * removed task will have been added to the xPendingReadyList.  Once the
3938          * scheduler has been resumed it is safe to move all the pending ready
3939          * tasks from this list into their appropriate ready list. */
3940         taskENTER_CRITICAL();
3941         {
3942             BaseType_t xCoreID;
3943             xCoreID = ( BaseType_t ) portGET_CORE_ID();
3944
3945             /* If uxSchedulerSuspended is zero then this function does not match a
3946              * previous call to vTaskSuspendAll(). */
3947             configASSERT( uxSchedulerSuspended != 0U );
3948
3949             --uxSchedulerSuspended;
3950             portRELEASE_TASK_LOCK();
3951
3952             if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
3953             {
3954                 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
3955                 {
3956                     /* Move any readied tasks from the pending list into the
3957                      * appropriate ready list. */
3958                     while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
3959                     {
3960                         /* MISRA Ref 11.5.3 [Void pointer assignment] */
3961                         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
3962                         /* coverity[misra_c_2012_rule_11_5_violation] */
3963                         pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) );
3964                         listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
3965                         portMEMORY_BARRIER();
3966                         listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
3967                         prvAddTaskToReadyList( pxTCB );
3968
3969                         #if ( configNUMBER_OF_CORES == 1 )
3970                         {
3971                             /* If the moved task has a priority higher than the current
3972                              * task then a yield must be performed. */
3973                             if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
3974                             {
3975                                 xYieldPendings[ xCoreID ] = pdTRUE;
3976                             }
3977                             else
3978                             {
3979                                 mtCOVERAGE_TEST_MARKER();
3980                             }
3981                         }
3982                         #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3983                         {
3984                             /* All appropriate tasks yield at the moment a task is added to xPendingReadyList.
3985                              * If the current core yielded then vTaskSwitchContext() has already been called
3986                              * which sets xYieldPendings for the current core to pdTRUE. */
3987                         }
3988                         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3989                     }
3990
3991                     if( pxTCB != NULL )
3992                     {
3993                         /* A task was unblocked while the scheduler was suspended,
3994                          * which may have prevented the next unblock time from being
3995                          * re-calculated, in which case re-calculate it now.  Mainly
3996                          * important for low power tickless implementations, where
3997                          * this can prevent an unnecessary exit from low power
3998                          * state. */
3999                         prvResetNextTaskUnblockTime();
4000                     }
4001
4002                     /* If any ticks occurred while the scheduler was suspended then
4003                      * they should be processed now.  This ensures the tick count does
4004                      * not  slip, and that any delayed tasks are resumed at the correct
4005                      * time.
4006                      *
4007                      * It should be safe to call xTaskIncrementTick here from any core
4008                      * since we are in a critical section and xTaskIncrementTick itself
4009                      * protects itself within a critical section. Suspending the scheduler
4010                      * from any core causes xTaskIncrementTick to increment uxPendedCounts. */
4011                     {
4012                         TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
4013
4014                         if( xPendedCounts > ( TickType_t ) 0U )
4015                         {
4016                             do
4017                             {
4018                                 if( xTaskIncrementTick() != pdFALSE )
4019                                 {
4020                                     /* Other cores are interrupted from
4021                                      * within xTaskIncrementTick(). */
4022                                     xYieldPendings[ xCoreID ] = pdTRUE;
4023                                 }
4024                                 else
4025                                 {
4026                                     mtCOVERAGE_TEST_MARKER();
4027                                 }
4028
4029                                 --xPendedCounts;
4030                             } while( xPendedCounts > ( TickType_t ) 0U );
4031
4032                             xPendedTicks = 0;
4033                         }
4034                         else
4035                         {
4036                             mtCOVERAGE_TEST_MARKER();
4037                         }
4038                     }
4039
4040                     if( xYieldPendings[ xCoreID ] != pdFALSE )
4041                     {
4042                         #if ( configUSE_PREEMPTION != 0 )
4043                         {
4044                             xAlreadyYielded = pdTRUE;
4045                         }
4046                         #endif /* #if ( configUSE_PREEMPTION != 0 ) */
4047
4048                         #if ( configNUMBER_OF_CORES == 1 )
4049                         {
4050                             taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxCurrentTCB );
4051                         }
4052                         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4053                     }
4054                     else
4055                     {
4056                         mtCOVERAGE_TEST_MARKER();
4057                     }
4058                 }
4059             }
4060             else
4061             {
4062                 mtCOVERAGE_TEST_MARKER();
4063             }
4064         }
4065         taskEXIT_CRITICAL();
4066     }
4067
4068     traceRETURN_xTaskResumeAll( xAlreadyYielded );
4069
4070     return xAlreadyYielded;
4071 }
4072 /*-----------------------------------------------------------*/
4073
4074 TickType_t xTaskGetTickCount( void )
4075 {
4076     TickType_t xTicks;
4077
4078     traceENTER_xTaskGetTickCount();
4079
4080     /* Critical section required if running on a 16 bit processor. */
4081     portTICK_TYPE_ENTER_CRITICAL();
4082     {
4083         xTicks = xTickCount;
4084     }
4085     portTICK_TYPE_EXIT_CRITICAL();
4086
4087     traceRETURN_xTaskGetTickCount( xTicks );
4088
4089     return xTicks;
4090 }
4091 /*-----------------------------------------------------------*/
4092
4093 TickType_t xTaskGetTickCountFromISR( void )
4094 {
4095     TickType_t xReturn;
4096     UBaseType_t uxSavedInterruptStatus;
4097
4098     traceENTER_xTaskGetTickCountFromISR();
4099
4100     /* RTOS ports that support interrupt nesting have the concept of a maximum
4101      * system call (or maximum API call) interrupt priority.  Interrupts that are
4102      * above the maximum system call priority are kept permanently enabled, even
4103      * when the RTOS kernel is in a critical section, but cannot make any calls to
4104      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
4105      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
4106      * failure if a FreeRTOS API function is called from an interrupt that has been
4107      * assigned a priority above the configured maximum system call priority.
4108      * Only FreeRTOS functions that end in FromISR can be called from interrupts
4109      * that have been assigned a priority at or (logically) below the maximum
4110      * system call  interrupt priority.  FreeRTOS maintains a separate interrupt
4111      * safe API to ensure interrupt entry is as fast and as simple as possible.
4112      * More information (albeit Cortex-M specific) is provided on the following
4113      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
4114     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
4115
4116     uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
4117     {
4118         xReturn = xTickCount;
4119     }
4120     portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
4121
4122     traceRETURN_xTaskGetTickCountFromISR( xReturn );
4123
4124     return xReturn;
4125 }
4126 /*-----------------------------------------------------------*/
4127
4128 UBaseType_t uxTaskGetNumberOfTasks( void )
4129 {
4130     traceENTER_uxTaskGetNumberOfTasks();
4131
4132     /* A critical section is not required because the variables are of type
4133      * BaseType_t. */
4134     traceRETURN_uxTaskGetNumberOfTasks( uxCurrentNumberOfTasks );
4135
4136     return uxCurrentNumberOfTasks;
4137 }
4138 /*-----------------------------------------------------------*/
4139
4140 char * pcTaskGetName( TaskHandle_t xTaskToQuery )
4141 {
4142     TCB_t * pxTCB;
4143
4144     traceENTER_pcTaskGetName( xTaskToQuery );
4145
4146     /* If null is passed in here then the name of the calling task is being
4147      * queried. */
4148     pxTCB = prvGetTCBFromHandle( xTaskToQuery );
4149     configASSERT( pxTCB );
4150
4151     traceRETURN_pcTaskGetName( &( pxTCB->pcTaskName[ 0 ] ) );
4152
4153     return &( pxTCB->pcTaskName[ 0 ] );
4154 }
4155 /*-----------------------------------------------------------*/
4156
4157 #if ( INCLUDE_xTaskGetHandle == 1 )
4158
4159     #if ( configNUMBER_OF_CORES == 1 )
4160         static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
4161                                                          const char pcNameToQuery[] )
4162         {
4163             TCB_t * pxNextTCB;
4164             TCB_t * pxFirstTCB;
4165             TCB_t * pxReturn = NULL;
4166             UBaseType_t x;
4167             char cNextChar;
4168             BaseType_t xBreakLoop;
4169
4170             /* This function is called with the scheduler suspended. */
4171
4172             if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
4173             {
4174                 /* MISRA Ref 11.5.3 [Void pointer assignment] */
4175                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4176                 /* coverity[misra_c_2012_rule_11_5_violation] */
4177                 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
4178
4179                 do
4180                 {
4181                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
4182                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4183                     /* coverity[misra_c_2012_rule_11_5_violation] */
4184                     listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
4185
4186                     /* Check each character in the name looking for a match or
4187                      * mismatch. */
4188                     xBreakLoop = pdFALSE;
4189
4190                     for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
4191                     {
4192                         cNextChar = pxNextTCB->pcTaskName[ x ];
4193
4194                         if( cNextChar != pcNameToQuery[ x ] )
4195                         {
4196                             /* Characters didn't match. */
4197                             xBreakLoop = pdTRUE;
4198                         }
4199                         else if( cNextChar == ( char ) 0x00 )
4200                         {
4201                             /* Both strings terminated, a match must have been
4202                              * found. */
4203                             pxReturn = pxNextTCB;
4204                             xBreakLoop = pdTRUE;
4205                         }
4206                         else
4207                         {
4208                             mtCOVERAGE_TEST_MARKER();
4209                         }
4210
4211                         if( xBreakLoop != pdFALSE )
4212                         {
4213                             break;
4214                         }
4215                     }
4216
4217                     if( pxReturn != NULL )
4218                     {
4219                         /* The handle has been found. */
4220                         break;
4221                     }
4222                 } while( pxNextTCB != pxFirstTCB );
4223             }
4224             else
4225             {
4226                 mtCOVERAGE_TEST_MARKER();
4227             }
4228
4229             return pxReturn;
4230         }
4231     #else /* if ( configNUMBER_OF_CORES == 1 ) */
4232         static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
4233                                                          const char pcNameToQuery[] )
4234         {
4235             TCB_t * pxReturn = NULL;
4236             UBaseType_t x;
4237             char cNextChar;
4238             BaseType_t xBreakLoop;
4239             const ListItem_t * pxEndMarker = listGET_END_MARKER( pxList );
4240             ListItem_t * pxIterator;
4241
4242             /* This function is called with the scheduler suspended. */
4243
4244             if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
4245             {
4246                 for( pxIterator = listGET_HEAD_ENTRY( pxList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
4247                 {
4248                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
4249                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4250                     /* coverity[misra_c_2012_rule_11_5_violation] */
4251                     TCB_t * pxTCB = listGET_LIST_ITEM_OWNER( pxIterator );
4252
4253                     /* Check each character in the name looking for a match or
4254                      * mismatch. */
4255                     xBreakLoop = pdFALSE;
4256
4257                     for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
4258                     {
4259                         cNextChar = pxTCB->pcTaskName[ x ];
4260
4261                         if( cNextChar != pcNameToQuery[ x ] )
4262                         {
4263                             /* Characters didn't match. */
4264                             xBreakLoop = pdTRUE;
4265                         }
4266                         else if( cNextChar == ( char ) 0x00 )
4267                         {
4268                             /* Both strings terminated, a match must have been
4269                              * found. */
4270                             pxReturn = pxTCB;
4271                             xBreakLoop = pdTRUE;
4272                         }
4273                         else
4274                         {
4275                             mtCOVERAGE_TEST_MARKER();
4276                         }
4277
4278                         if( xBreakLoop != pdFALSE )
4279                         {
4280                             break;
4281                         }
4282                     }
4283
4284                     if( pxReturn != NULL )
4285                     {
4286                         /* The handle has been found. */
4287                         break;
4288                     }
4289                 }
4290             }
4291             else
4292             {
4293                 mtCOVERAGE_TEST_MARKER();
4294             }
4295
4296             return pxReturn;
4297         }
4298     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4299
4300 #endif /* INCLUDE_xTaskGetHandle */
4301 /*-----------------------------------------------------------*/
4302
4303 #if ( INCLUDE_xTaskGetHandle == 1 )
4304
4305     TaskHandle_t xTaskGetHandle( const char * pcNameToQuery )
4306     {
4307         UBaseType_t uxQueue = configMAX_PRIORITIES;
4308         TCB_t * pxTCB;
4309
4310         traceENTER_xTaskGetHandle( pcNameToQuery );
4311
4312         /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
4313         configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
4314
4315         vTaskSuspendAll();
4316         {
4317             /* Search the ready lists. */
4318             do
4319             {
4320                 uxQueue--;
4321                 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
4322
4323                 if( pxTCB != NULL )
4324                 {
4325                     /* Found the handle. */
4326                     break;
4327                 }
4328             } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY );
4329
4330             /* Search the delayed lists. */
4331             if( pxTCB == NULL )
4332             {
4333                 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
4334             }
4335
4336             if( pxTCB == NULL )
4337             {
4338                 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
4339             }
4340
4341             #if ( INCLUDE_vTaskSuspend == 1 )
4342             {
4343                 if( pxTCB == NULL )
4344                 {
4345                     /* Search the suspended list. */
4346                     pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
4347                 }
4348             }
4349             #endif
4350
4351             #if ( INCLUDE_vTaskDelete == 1 )
4352             {
4353                 if( pxTCB == NULL )
4354                 {
4355                     /* Search the deleted list. */
4356                     pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
4357                 }
4358             }
4359             #endif
4360         }
4361         ( void ) xTaskResumeAll();
4362
4363         traceRETURN_xTaskGetHandle( pxTCB );
4364
4365         return pxTCB;
4366     }
4367
4368 #endif /* INCLUDE_xTaskGetHandle */
4369 /*-----------------------------------------------------------*/
4370
4371 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
4372
4373     BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
4374                                       StackType_t ** ppuxStackBuffer,
4375                                       StaticTask_t ** ppxTaskBuffer )
4376     {
4377         BaseType_t xReturn;
4378         TCB_t * pxTCB;
4379
4380         traceENTER_xTaskGetStaticBuffers( xTask, ppuxStackBuffer, ppxTaskBuffer );
4381
4382         configASSERT( ppuxStackBuffer != NULL );
4383         configASSERT( ppxTaskBuffer != NULL );
4384
4385         pxTCB = prvGetTCBFromHandle( xTask );
4386
4387         #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
4388         {
4389             if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
4390             {
4391                 *ppuxStackBuffer = pxTCB->pxStack;
4392                 /* MISRA Ref 11.3.1 [Misaligned access] */
4393                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
4394                 /* coverity[misra_c_2012_rule_11_3_violation] */
4395                 *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
4396                 xReturn = pdTRUE;
4397             }
4398             else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
4399             {
4400                 *ppuxStackBuffer = pxTCB->pxStack;
4401                 *ppxTaskBuffer = NULL;
4402                 xReturn = pdTRUE;
4403             }
4404             else
4405             {
4406                 xReturn = pdFALSE;
4407             }
4408         }
4409         #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
4410         {
4411             *ppuxStackBuffer = pxTCB->pxStack;
4412             *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
4413             xReturn = pdTRUE;
4414         }
4415         #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
4416
4417         traceRETURN_xTaskGetStaticBuffers( xReturn );
4418
4419         return xReturn;
4420     }
4421
4422 #endif /* configSUPPORT_STATIC_ALLOCATION */
4423 /*-----------------------------------------------------------*/
4424
4425 #if ( configUSE_TRACE_FACILITY == 1 )
4426
4427     UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
4428                                       const UBaseType_t uxArraySize,
4429                                       configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime )
4430     {
4431         UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
4432
4433         traceENTER_uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime );
4434
4435         vTaskSuspendAll();
4436         {
4437             /* Is there a space in the array for each task in the system? */
4438             if( uxArraySize >= uxCurrentNumberOfTasks )
4439             {
4440                 /* Fill in an TaskStatus_t structure with information on each
4441                  * task in the Ready state. */
4442                 do
4443                 {
4444                     uxQueue--;
4445                     uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ) );
4446                 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY );
4447
4448                 /* Fill in an TaskStatus_t structure with information on each
4449                  * task in the Blocked state. */
4450                 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ) );
4451                 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ) );
4452
4453                 #if ( INCLUDE_vTaskDelete == 1 )
4454                 {
4455                     /* Fill in an TaskStatus_t structure with information on
4456                      * each task that has been deleted but not yet cleaned up. */
4457                     uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ) );
4458                 }
4459                 #endif
4460
4461                 #if ( INCLUDE_vTaskSuspend == 1 )
4462                 {
4463                     /* Fill in an TaskStatus_t structure with information on
4464                      * each task in the Suspended state. */
4465                     uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ) );
4466                 }
4467                 #endif
4468
4469                 #if ( configGENERATE_RUN_TIME_STATS == 1 )
4470                 {
4471                     if( pulTotalRunTime != NULL )
4472                     {
4473                         #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
4474                             portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
4475                         #else
4476                             *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
4477                         #endif
4478                     }
4479                 }
4480                 #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
4481                 {
4482                     if( pulTotalRunTime != NULL )
4483                     {
4484                         *pulTotalRunTime = 0;
4485                     }
4486                 }
4487                 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
4488             }
4489             else
4490             {
4491                 mtCOVERAGE_TEST_MARKER();
4492             }
4493         }
4494         ( void ) xTaskResumeAll();
4495
4496         traceRETURN_uxTaskGetSystemState( uxTask );
4497
4498         return uxTask;
4499     }
4500
4501 #endif /* configUSE_TRACE_FACILITY */
4502 /*----------------------------------------------------------*/
4503
4504 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
4505
4506     #if ( configNUMBER_OF_CORES == 1 )
4507         TaskHandle_t xTaskGetIdleTaskHandle( void )
4508         {
4509             traceENTER_xTaskGetIdleTaskHandle();
4510
4511             /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
4512              * started, then xIdleTaskHandles will be NULL. */
4513             configASSERT( ( xIdleTaskHandles[ 0 ] != NULL ) );
4514
4515             traceRETURN_xTaskGetIdleTaskHandle( xIdleTaskHandles[ 0 ] );
4516
4517             return xIdleTaskHandles[ 0 ];
4518         }
4519     #endif /* if ( configNUMBER_OF_CORES == 1 ) */
4520
4521     TaskHandle_t xTaskGetIdleTaskHandleForCore( BaseType_t xCoreID )
4522     {
4523         traceENTER_xTaskGetIdleTaskHandleForCore( xCoreID );
4524
4525         /* Ensure the core ID is valid. */
4526         configASSERT( taskVALID_CORE_ID( xCoreID ) == pdTRUE );
4527
4528         /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
4529          * started, then xIdleTaskHandles will be NULL. */
4530         configASSERT( ( xIdleTaskHandles[ xCoreID ] != NULL ) );
4531
4532         traceRETURN_xTaskGetIdleTaskHandleForCore( xIdleTaskHandles[ xCoreID ] );
4533
4534         return xIdleTaskHandles[ xCoreID ];
4535     }
4536
4537 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
4538 /*----------------------------------------------------------*/
4539
4540 /* This conditional compilation should use inequality to 0, not equality to 1.
4541  * This is to ensure vTaskStepTick() is available when user defined low power mode
4542  * implementations require configUSE_TICKLESS_IDLE to be set to a value other than
4543  * 1. */
4544 #if ( configUSE_TICKLESS_IDLE != 0 )
4545
4546     void vTaskStepTick( TickType_t xTicksToJump )
4547     {
4548         TickType_t xUpdatedTickCount;
4549
4550         traceENTER_vTaskStepTick( xTicksToJump );
4551
4552         /* Correct the tick count value after a period during which the tick
4553          * was suppressed.  Note this does *not* call the tick hook function for
4554          * each stepped tick. */
4555         xUpdatedTickCount = xTickCount + xTicksToJump;
4556         configASSERT( xUpdatedTickCount <= xNextTaskUnblockTime );
4557
4558         if( xUpdatedTickCount == xNextTaskUnblockTime )
4559         {
4560             /* Arrange for xTickCount to reach xNextTaskUnblockTime in
4561              * xTaskIncrementTick() when the scheduler resumes.  This ensures
4562              * that any delayed tasks are resumed at the correct time. */
4563             configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
4564             configASSERT( xTicksToJump != ( TickType_t ) 0 );
4565
4566             /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
4567             taskENTER_CRITICAL();
4568             {
4569                 xPendedTicks++;
4570             }
4571             taskEXIT_CRITICAL();
4572             xTicksToJump--;
4573         }
4574         else
4575         {
4576             mtCOVERAGE_TEST_MARKER();
4577         }
4578
4579         xTickCount += xTicksToJump;
4580
4581         traceINCREASE_TICK_COUNT( xTicksToJump );
4582         traceRETURN_vTaskStepTick();
4583     }
4584
4585 #endif /* configUSE_TICKLESS_IDLE */
4586 /*----------------------------------------------------------*/
4587
4588 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
4589 {
4590     BaseType_t xYieldOccurred;
4591
4592     traceENTER_xTaskCatchUpTicks( xTicksToCatchUp );
4593
4594     /* Must not be called with the scheduler suspended as the implementation
4595      * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
4596     configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
4597
4598     /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
4599      * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
4600     vTaskSuspendAll();
4601
4602     /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
4603     taskENTER_CRITICAL();
4604     {
4605         xPendedTicks += xTicksToCatchUp;
4606     }
4607     taskEXIT_CRITICAL();
4608     xYieldOccurred = xTaskResumeAll();
4609
4610     traceRETURN_xTaskCatchUpTicks( xYieldOccurred );
4611
4612     return xYieldOccurred;
4613 }
4614 /*----------------------------------------------------------*/
4615
4616 #if ( INCLUDE_xTaskAbortDelay == 1 )
4617
4618     BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
4619     {
4620         TCB_t * pxTCB = xTask;
4621         BaseType_t xReturn;
4622
4623         traceENTER_xTaskAbortDelay( xTask );
4624
4625         configASSERT( pxTCB );
4626
4627         vTaskSuspendAll();
4628         {
4629             /* A task can only be prematurely removed from the Blocked state if
4630              * it is actually in the Blocked state. */
4631             if( eTaskGetState( xTask ) == eBlocked )
4632             {
4633                 xReturn = pdPASS;
4634
4635                 /* Remove the reference to the task from the blocked list.  An
4636                  * interrupt won't touch the xStateListItem because the
4637                  * scheduler is suspended. */
4638                 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4639
4640                 /* Is the task waiting on an event also?  If so remove it from
4641                  * the event list too.  Interrupts can touch the event list item,
4642                  * even though the scheduler is suspended, so a critical section
4643                  * is used. */
4644                 taskENTER_CRITICAL();
4645                 {
4646                     if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4647                     {
4648                         ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
4649
4650                         /* This lets the task know it was forcibly removed from the
4651                          * blocked state so it should not re-evaluate its block time and
4652                          * then block again. */
4653                         pxTCB->ucDelayAborted = pdTRUE;
4654                     }
4655                     else
4656                     {
4657                         mtCOVERAGE_TEST_MARKER();
4658                     }
4659                 }
4660                 taskEXIT_CRITICAL();
4661
4662                 /* Place the unblocked task into the appropriate ready list. */
4663                 prvAddTaskToReadyList( pxTCB );
4664
4665                 /* A task being unblocked cannot cause an immediate context
4666                  * switch if preemption is turned off. */
4667                 #if ( configUSE_PREEMPTION == 1 )
4668                 {
4669                     #if ( configNUMBER_OF_CORES == 1 )
4670                     {
4671                         /* Preemption is on, but a context switch should only be
4672                          * performed if the unblocked task has a priority that is
4673                          * higher than the currently executing task. */
4674                         if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4675                         {
4676                             /* Pend the yield to be performed when the scheduler
4677                              * is unsuspended. */
4678                             xYieldPendings[ 0 ] = pdTRUE;
4679                         }
4680                         else
4681                         {
4682                             mtCOVERAGE_TEST_MARKER();
4683                         }
4684                     }
4685                     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4686                     {
4687                         taskENTER_CRITICAL();
4688                         {
4689                             prvYieldForTask( pxTCB );
4690                         }
4691                         taskEXIT_CRITICAL();
4692                     }
4693                     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4694                 }
4695                 #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4696             }
4697             else
4698             {
4699                 xReturn = pdFAIL;
4700             }
4701         }
4702         ( void ) xTaskResumeAll();
4703
4704         traceRETURN_xTaskAbortDelay( xReturn );
4705
4706         return xReturn;
4707     }
4708
4709 #endif /* INCLUDE_xTaskAbortDelay */
4710 /*----------------------------------------------------------*/
4711
4712 BaseType_t xTaskIncrementTick( void )
4713 {
4714     TCB_t * pxTCB;
4715     TickType_t xItemValue;
4716     BaseType_t xSwitchRequired = pdFALSE;
4717
4718     #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 )
4719     BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE };
4720     #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
4721
4722     traceENTER_xTaskIncrementTick();
4723
4724     /* Called by the portable layer each time a tick interrupt occurs.
4725      * Increments the tick then checks to see if the new tick value will cause any
4726      * tasks to be unblocked. */
4727     traceTASK_INCREMENT_TICK( xTickCount );
4728
4729     /* Tick increment should occur on every kernel timer event. Core 0 has the
4730      * responsibility to increment the tick, or increment the pended ticks if the
4731      * scheduler is suspended.  If pended ticks is greater than zero, the core that
4732      * calls xTaskResumeAll has the responsibility to increment the tick. */
4733     if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
4734     {
4735         /* Minor optimisation.  The tick count cannot change in this
4736          * block. */
4737         const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
4738
4739         /* Increment the RTOS tick, switching the delayed and overflowed
4740          * delayed lists if it wraps to 0. */
4741         xTickCount = xConstTickCount;
4742
4743         if( xConstTickCount == ( TickType_t ) 0U )
4744         {
4745             taskSWITCH_DELAYED_LISTS();
4746         }
4747         else
4748         {
4749             mtCOVERAGE_TEST_MARKER();
4750         }
4751
4752         /* See if this tick has made a timeout expire.  Tasks are stored in
4753          * the  queue in the order of their wake time - meaning once one task
4754          * has been found whose block time has not expired there is no need to
4755          * look any further down the list. */
4756         if( xConstTickCount >= xNextTaskUnblockTime )
4757         {
4758             for( ; ; )
4759             {
4760                 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
4761                 {
4762                     /* The delayed list is empty.  Set xNextTaskUnblockTime
4763                      * to the maximum possible value so it is extremely
4764                      * unlikely that the
4765                      * if( xTickCount >= xNextTaskUnblockTime ) test will pass
4766                      * next time through. */
4767                     xNextTaskUnblockTime = portMAX_DELAY;
4768                     break;
4769                 }
4770                 else
4771                 {
4772                     /* The delayed list is not empty, get the value of the
4773                      * item at the head of the delayed list.  This is the time
4774                      * at which the task at the head of the delayed list must
4775                      * be removed from the Blocked state. */
4776                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
4777                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4778                     /* coverity[misra_c_2012_rule_11_5_violation] */
4779                     pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
4780                     xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
4781
4782                     if( xConstTickCount < xItemValue )
4783                     {
4784                         /* It is not time to unblock this item yet, but the
4785                          * item value is the time at which the task at the head
4786                          * of the blocked list must be removed from the Blocked
4787                          * state -  so record the item value in
4788                          * xNextTaskUnblockTime. */
4789                         xNextTaskUnblockTime = xItemValue;
4790                         break;
4791                     }
4792                     else
4793                     {
4794                         mtCOVERAGE_TEST_MARKER();
4795                     }
4796
4797                     /* It is time to remove the item from the Blocked state. */
4798                     listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
4799
4800                     /* Is the task waiting on an event also?  If so remove
4801                      * it from the event list. */
4802                     if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4803                     {
4804                         listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
4805                     }
4806                     else
4807                     {
4808                         mtCOVERAGE_TEST_MARKER();
4809                     }
4810
4811                     /* Place the unblocked task into the appropriate ready
4812                      * list. */
4813                     prvAddTaskToReadyList( pxTCB );
4814
4815                     /* A task being unblocked cannot cause an immediate
4816                      * context switch if preemption is turned off. */
4817                     #if ( configUSE_PREEMPTION == 1 )
4818                     {
4819                         #if ( configNUMBER_OF_CORES == 1 )
4820                         {
4821                             /* Preemption is on, but a context switch should
4822                              * only be performed if the unblocked task's
4823                              * priority is higher than the currently executing
4824                              * task.
4825                              * The case of equal priority tasks sharing
4826                              * processing time (which happens when both
4827                              * preemption and time slicing are on) is
4828                              * handled below.*/
4829                             if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4830                             {
4831                                 xSwitchRequired = pdTRUE;
4832                             }
4833                             else
4834                             {
4835                                 mtCOVERAGE_TEST_MARKER();
4836                             }
4837                         }
4838                         #else /* #if( configNUMBER_OF_CORES == 1 ) */
4839                         {
4840                             prvYieldForTask( pxTCB );
4841                         }
4842                         #endif /* #if( configNUMBER_OF_CORES == 1 ) */
4843                     }
4844                     #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4845                 }
4846             }
4847         }
4848
4849         /* Tasks of equal priority to the currently running task will share
4850          * processing time (time slice) if preemption is on, and the application
4851          * writer has not explicitly turned time slicing off. */
4852         #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
4853         {
4854             #if ( configNUMBER_OF_CORES == 1 )
4855             {
4856                 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U )
4857                 {
4858                     xSwitchRequired = pdTRUE;
4859                 }
4860                 else
4861                 {
4862                     mtCOVERAGE_TEST_MARKER();
4863                 }
4864             }
4865             #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4866             {
4867                 BaseType_t xCoreID;
4868
4869                 for( xCoreID = 0; xCoreID < ( ( BaseType_t ) configNUMBER_OF_CORES ); xCoreID++ )
4870                 {
4871                     if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ) ) > 1U )
4872                     {
4873                         xYieldRequiredForCore[ xCoreID ] = pdTRUE;
4874                     }
4875                     else
4876                     {
4877                         mtCOVERAGE_TEST_MARKER();
4878                     }
4879                 }
4880             }
4881             #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4882         }
4883         #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
4884
4885         #if ( configUSE_TICK_HOOK == 1 )
4886         {
4887             /* Guard against the tick hook being called when the pended tick
4888              * count is being unwound (when the scheduler is being unlocked). */
4889             if( xPendedTicks == ( TickType_t ) 0 )
4890             {
4891                 vApplicationTickHook();
4892             }
4893             else
4894             {
4895                 mtCOVERAGE_TEST_MARKER();
4896             }
4897         }
4898         #endif /* configUSE_TICK_HOOK */
4899
4900         #if ( configUSE_PREEMPTION == 1 )
4901         {
4902             #if ( configNUMBER_OF_CORES == 1 )
4903             {
4904                 /* For single core the core ID is always 0. */
4905                 if( xYieldPendings[ 0 ] != pdFALSE )
4906                 {
4907                     xSwitchRequired = pdTRUE;
4908                 }
4909                 else
4910                 {
4911                     mtCOVERAGE_TEST_MARKER();
4912                 }
4913             }
4914             #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4915             {
4916                 BaseType_t xCoreID, xCurrentCoreID;
4917                 xCurrentCoreID = ( BaseType_t ) portGET_CORE_ID();
4918
4919                 for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
4920                 {
4921                     #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
4922                         if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
4923                     #endif
4924                     {
4925                         if( ( xYieldRequiredForCore[ xCoreID ] != pdFALSE ) || ( xYieldPendings[ xCoreID ] != pdFALSE ) )
4926                         {
4927                             if( xCoreID == xCurrentCoreID )
4928                             {
4929                                 xSwitchRequired = pdTRUE;
4930                             }
4931                             else
4932                             {
4933                                 prvYieldCore( xCoreID );
4934                             }
4935                         }
4936                         else
4937                         {
4938                             mtCOVERAGE_TEST_MARKER();
4939                         }
4940                     }
4941                 }
4942             }
4943             #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4944         }
4945         #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4946     }
4947     else
4948     {
4949         ++xPendedTicks;
4950
4951         /* The tick hook gets called at regular intervals, even if the
4952          * scheduler is locked. */
4953         #if ( configUSE_TICK_HOOK == 1 )
4954         {
4955             vApplicationTickHook();
4956         }
4957         #endif
4958     }
4959
4960     traceRETURN_xTaskIncrementTick( xSwitchRequired );
4961
4962     return xSwitchRequired;
4963 }
4964 /*-----------------------------------------------------------*/
4965
4966 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
4967
4968     void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
4969                                      TaskHookFunction_t pxHookFunction )
4970     {
4971         TCB_t * xTCB;
4972
4973         traceENTER_vTaskSetApplicationTaskTag( xTask, pxHookFunction );
4974
4975         /* If xTask is NULL then it is the task hook of the calling task that is
4976          * getting set. */
4977         if( xTask == NULL )
4978         {
4979             xTCB = ( TCB_t * ) pxCurrentTCB;
4980         }
4981         else
4982         {
4983             xTCB = xTask;
4984         }
4985
4986         /* Save the hook function in the TCB.  A critical section is required as
4987          * the value can be accessed from an interrupt. */
4988         taskENTER_CRITICAL();
4989         {
4990             xTCB->pxTaskTag = pxHookFunction;
4991         }
4992         taskEXIT_CRITICAL();
4993
4994         traceRETURN_vTaskSetApplicationTaskTag();
4995     }
4996
4997 #endif /* configUSE_APPLICATION_TASK_TAG */
4998 /*-----------------------------------------------------------*/
4999
5000 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
5001
5002     TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
5003     {
5004         TCB_t * pxTCB;
5005         TaskHookFunction_t xReturn;
5006
5007         traceENTER_xTaskGetApplicationTaskTag( xTask );
5008
5009         /* If xTask is NULL then set the calling task's hook. */
5010         pxTCB = prvGetTCBFromHandle( xTask );
5011
5012         /* Save the hook function in the TCB.  A critical section is required as
5013          * the value can be accessed from an interrupt. */
5014         taskENTER_CRITICAL();
5015         {
5016             xReturn = pxTCB->pxTaskTag;
5017         }
5018         taskEXIT_CRITICAL();
5019
5020         traceRETURN_xTaskGetApplicationTaskTag( xReturn );
5021
5022         return xReturn;
5023     }
5024
5025 #endif /* configUSE_APPLICATION_TASK_TAG */
5026 /*-----------------------------------------------------------*/
5027
5028 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
5029
5030     TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
5031     {
5032         TCB_t * pxTCB;
5033         TaskHookFunction_t xReturn;
5034         UBaseType_t uxSavedInterruptStatus;
5035
5036         traceENTER_xTaskGetApplicationTaskTagFromISR( xTask );
5037
5038         /* If xTask is NULL then set the calling task's hook. */
5039         pxTCB = prvGetTCBFromHandle( xTask );
5040
5041         /* Save the hook function in the TCB.  A critical section is required as
5042          * the value can be accessed from an interrupt. */
5043         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
5044         {
5045             xReturn = pxTCB->pxTaskTag;
5046         }
5047         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
5048
5049         traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn );
5050
5051         return xReturn;
5052     }
5053
5054 #endif /* configUSE_APPLICATION_TASK_TAG */
5055 /*-----------------------------------------------------------*/
5056
5057 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
5058
5059     BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
5060                                              void * pvParameter )
5061     {
5062         TCB_t * xTCB;
5063         BaseType_t xReturn;
5064
5065         traceENTER_xTaskCallApplicationTaskHook( xTask, pvParameter );
5066
5067         /* If xTask is NULL then we are calling our own task hook. */
5068         if( xTask == NULL )
5069         {
5070             xTCB = pxCurrentTCB;
5071         }
5072         else
5073         {
5074             xTCB = xTask;
5075         }
5076
5077         if( xTCB->pxTaskTag != NULL )
5078         {
5079             xReturn = xTCB->pxTaskTag( pvParameter );
5080         }
5081         else
5082         {
5083             xReturn = pdFAIL;
5084         }
5085
5086         traceRETURN_xTaskCallApplicationTaskHook( xReturn );
5087
5088         return xReturn;
5089     }
5090
5091 #endif /* configUSE_APPLICATION_TASK_TAG */
5092 /*-----------------------------------------------------------*/
5093
5094 #if ( configNUMBER_OF_CORES == 1 )
5095     void vTaskSwitchContext( void )
5096     {
5097         traceENTER_vTaskSwitchContext();
5098
5099         if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
5100         {
5101             /* The scheduler is currently suspended - do not allow a context
5102              * switch. */
5103             xYieldPendings[ 0 ] = pdTRUE;
5104         }
5105         else
5106         {
5107             xYieldPendings[ 0 ] = pdFALSE;
5108             traceTASK_SWITCHED_OUT();
5109
5110             #if ( configGENERATE_RUN_TIME_STATS == 1 )
5111             {
5112                 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
5113                     portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ 0 ] );
5114                 #else
5115                     ulTotalRunTime[ 0 ] = portGET_RUN_TIME_COUNTER_VALUE();
5116                 #endif
5117
5118                 /* Add the amount of time the task has been running to the
5119                  * accumulated time so far.  The time the task started running was
5120                  * stored in ulTaskSwitchedInTime.  Note that there is no overflow
5121                  * protection here so count values are only valid until the timer
5122                  * overflows.  The guard against negative values is to protect
5123                  * against suspect run time stat counter implementations - which
5124                  * are provided by the application, not the kernel. */
5125                 if( ulTotalRunTime[ 0 ] > ulTaskSwitchedInTime[ 0 ] )
5126                 {
5127                     pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ 0 ] - ulTaskSwitchedInTime[ 0 ] );
5128                 }
5129                 else
5130                 {
5131                     mtCOVERAGE_TEST_MARKER();
5132                 }
5133
5134                 ulTaskSwitchedInTime[ 0 ] = ulTotalRunTime[ 0 ];
5135             }
5136             #endif /* configGENERATE_RUN_TIME_STATS */
5137
5138             /* Check for stack overflow, if configured. */
5139             taskCHECK_FOR_STACK_OVERFLOW();
5140
5141             /* Before the currently running task is switched out, save its errno. */
5142             #if ( configUSE_POSIX_ERRNO == 1 )
5143             {
5144                 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
5145             }
5146             #endif
5147
5148             /* Select a new task to run using either the generic C or port
5149              * optimised asm code. */
5150             /* MISRA Ref 11.5.3 [Void pointer assignment] */
5151             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
5152             /* coverity[misra_c_2012_rule_11_5_violation] */
5153             taskSELECT_HIGHEST_PRIORITY_TASK();
5154             traceTASK_SWITCHED_IN();
5155
5156             /* After the new task is switched in, update the global errno. */
5157             #if ( configUSE_POSIX_ERRNO == 1 )
5158             {
5159                 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
5160             }
5161             #endif
5162
5163             #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
5164             {
5165                 /* Switch C-Runtime's TLS Block to point to the TLS
5166                  * Block specific to this task. */
5167                 configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
5168             }
5169             #endif
5170         }
5171
5172         traceRETURN_vTaskSwitchContext();
5173     }
5174 #else /* if ( configNUMBER_OF_CORES == 1 ) */
5175     void vTaskSwitchContext( BaseType_t xCoreID )
5176     {
5177         traceENTER_vTaskSwitchContext();
5178
5179         /* Acquire both locks:
5180          * - The ISR lock protects the ready list from simultaneous access by
5181          *   both other ISRs and tasks.
5182          * - We also take the task lock to pause here in case another core has
5183          *   suspended the scheduler. We don't want to simply set xYieldPending
5184          *   and move on if another core suspended the scheduler. We should only
5185          *   do that if the current core has suspended the scheduler. */
5186
5187         portGET_TASK_LOCK(); /* Must always acquire the task lock first. */
5188         portGET_ISR_LOCK();
5189         {
5190             /* vTaskSwitchContext() must never be called from within a critical section.
5191              * This is not necessarily true for single core FreeRTOS, but it is for this
5192              * SMP port. */
5193             configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 );
5194
5195             if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
5196             {
5197                 /* The scheduler is currently suspended - do not allow a context
5198                  * switch. */
5199                 xYieldPendings[ xCoreID ] = pdTRUE;
5200             }
5201             else
5202             {
5203                 xYieldPendings[ xCoreID ] = pdFALSE;
5204                 traceTASK_SWITCHED_OUT();
5205
5206                 #if ( configGENERATE_RUN_TIME_STATS == 1 )
5207                 {
5208                     #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
5209                         portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ xCoreID ] );
5210                     #else
5211                         ulTotalRunTime[ xCoreID ] = portGET_RUN_TIME_COUNTER_VALUE();
5212                     #endif
5213
5214                     /* Add the amount of time the task has been running to the
5215                      * accumulated time so far.  The time the task started running was
5216                      * stored in ulTaskSwitchedInTime.  Note that there is no overflow
5217                      * protection here so count values are only valid until the timer
5218                      * overflows.  The guard against negative values is to protect
5219                      * against suspect run time stat counter implementations - which
5220                      * are provided by the application, not the kernel. */
5221                     if( ulTotalRunTime[ xCoreID ] > ulTaskSwitchedInTime[ xCoreID ] )
5222                     {
5223                         pxCurrentTCBs[ xCoreID ]->ulRunTimeCounter += ( ulTotalRunTime[ xCoreID ] - ulTaskSwitchedInTime[ xCoreID ] );
5224                     }
5225                     else
5226                     {
5227                         mtCOVERAGE_TEST_MARKER();
5228                     }
5229
5230                     ulTaskSwitchedInTime[ xCoreID ] = ulTotalRunTime[ xCoreID ];
5231                 }
5232                 #endif /* configGENERATE_RUN_TIME_STATS */
5233
5234                 /* Check for stack overflow, if configured. */
5235                 taskCHECK_FOR_STACK_OVERFLOW();
5236
5237                 /* Before the currently running task is switched out, save its errno. */
5238                 #if ( configUSE_POSIX_ERRNO == 1 )
5239                 {
5240                     pxCurrentTCBs[ xCoreID ]->iTaskErrno = FreeRTOS_errno;
5241                 }
5242                 #endif
5243
5244                 /* Select a new task to run. */
5245                 taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID );
5246                 traceTASK_SWITCHED_IN();
5247
5248                 /* After the new task is switched in, update the global errno. */
5249                 #if ( configUSE_POSIX_ERRNO == 1 )
5250                 {
5251                     FreeRTOS_errno = pxCurrentTCBs[ xCoreID ]->iTaskErrno;
5252                 }
5253                 #endif
5254
5255                 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
5256                 {
5257                     /* Switch C-Runtime's TLS Block to point to the TLS
5258                      * Block specific to this task. */
5259                     configSET_TLS_BLOCK( pxCurrentTCBs[ xCoreID ]->xTLSBlock );
5260                 }
5261                 #endif
5262             }
5263         }
5264         portRELEASE_ISR_LOCK();
5265         portRELEASE_TASK_LOCK();
5266
5267         traceRETURN_vTaskSwitchContext();
5268     }
5269 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
5270 /*-----------------------------------------------------------*/
5271
5272 void vTaskPlaceOnEventList( List_t * const pxEventList,
5273                             const TickType_t xTicksToWait )
5274 {
5275     traceENTER_vTaskPlaceOnEventList( pxEventList, xTicksToWait );
5276
5277     configASSERT( pxEventList );
5278
5279     /* THIS FUNCTION MUST BE CALLED WITH THE
5280      * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
5281
5282     /* Place the event list item of the TCB in the appropriate event list.
5283      * This is placed in the list in priority order so the highest priority task
5284      * is the first to be woken by the event.
5285      *
5286      * Note: Lists are sorted in ascending order by ListItem_t.xItemValue.
5287      * Normally, the xItemValue of a TCB's ListItem_t members is:
5288      *      xItemValue = ( configMAX_PRIORITIES - uxPriority )
5289      * Therefore, the event list is sorted in descending priority order.
5290      *
5291      * The queue that contains the event list is locked, preventing
5292      * simultaneous access from interrupts. */
5293     vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
5294
5295     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
5296
5297     traceRETURN_vTaskPlaceOnEventList();
5298 }
5299 /*-----------------------------------------------------------*/
5300
5301 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
5302                                      const TickType_t xItemValue,
5303                                      const TickType_t xTicksToWait )
5304 {
5305     traceENTER_vTaskPlaceOnUnorderedEventList( pxEventList, xItemValue, xTicksToWait );
5306
5307     configASSERT( pxEventList );
5308
5309     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED.  It is used by
5310      * the event groups implementation. */
5311     configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
5312
5313     /* Store the item value in the event list item.  It is safe to access the
5314      * event list item here as interrupts won't access the event list item of a
5315      * task that is not in the Blocked state. */
5316     listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
5317
5318     /* Place the event list item of the TCB at the end of the appropriate event
5319      * list.  It is safe to access the event list here because it is part of an
5320      * event group implementation - and interrupts don't access event groups
5321      * directly (instead they access them indirectly by pending function calls to
5322      * the task level). */
5323     listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
5324
5325     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
5326
5327     traceRETURN_vTaskPlaceOnUnorderedEventList();
5328 }
5329 /*-----------------------------------------------------------*/
5330
5331 #if ( configUSE_TIMERS == 1 )
5332
5333     void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
5334                                           TickType_t xTicksToWait,
5335                                           const BaseType_t xWaitIndefinitely )
5336     {
5337         traceENTER_vTaskPlaceOnEventListRestricted( pxEventList, xTicksToWait, xWaitIndefinitely );
5338
5339         configASSERT( pxEventList );
5340
5341         /* This function should not be called by application code hence the
5342          * 'Restricted' in its name.  It is not part of the public API.  It is
5343          * designed for use by kernel code, and has special calling requirements -
5344          * it should be called with the scheduler suspended. */
5345
5346
5347         /* Place the event list item of the TCB in the appropriate event list.
5348          * In this case it is assume that this is the only task that is going to
5349          * be waiting on this event list, so the faster vListInsertEnd() function
5350          * can be used in place of vListInsert. */
5351         listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
5352
5353         /* If the task should block indefinitely then set the block time to a
5354          * value that will be recognised as an indefinite delay inside the
5355          * prvAddCurrentTaskToDelayedList() function. */
5356         if( xWaitIndefinitely != pdFALSE )
5357         {
5358             xTicksToWait = portMAX_DELAY;
5359         }
5360
5361         traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
5362         prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
5363
5364         traceRETURN_vTaskPlaceOnEventListRestricted();
5365     }
5366
5367 #endif /* configUSE_TIMERS */
5368 /*-----------------------------------------------------------*/
5369
5370 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
5371 {
5372     TCB_t * pxUnblockedTCB;
5373     BaseType_t xReturn;
5374
5375     traceENTER_xTaskRemoveFromEventList( pxEventList );
5376
5377     /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION.  It can also be
5378      * called from a critical section within an ISR. */
5379
5380     /* The event list is sorted in priority order, so the first in the list can
5381      * be removed as it is known to be the highest priority.  Remove the TCB from
5382      * the delayed list, and add it to the ready list.
5383      *
5384      * If an event is for a queue that is locked then this function will never
5385      * get called - the lock count on the queue will get modified instead.  This
5386      * means exclusive access to the event list is guaranteed here.
5387      *
5388      * This function assumes that a check has already been made to ensure that
5389      * pxEventList is not empty. */
5390     /* MISRA Ref 11.5.3 [Void pointer assignment] */
5391     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
5392     /* coverity[misra_c_2012_rule_11_5_violation] */
5393     pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
5394     configASSERT( pxUnblockedTCB );
5395     listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
5396
5397     if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
5398     {
5399         listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
5400         prvAddTaskToReadyList( pxUnblockedTCB );
5401
5402         #if ( configUSE_TICKLESS_IDLE != 0 )
5403         {
5404             /* If a task is blocked on a kernel object then xNextTaskUnblockTime
5405              * might be set to the blocked task's time out time.  If the task is
5406              * unblocked for a reason other than a timeout xNextTaskUnblockTime is
5407              * normally left unchanged, because it is automatically reset to a new
5408              * value when the tick count equals xNextTaskUnblockTime.  However if
5409              * tickless idling is used it might be more important to enter sleep mode
5410              * at the earliest possible time - so reset xNextTaskUnblockTime here to
5411              * ensure it is updated at the earliest possible time. */
5412             prvResetNextTaskUnblockTime();
5413         }
5414         #endif
5415     }
5416     else
5417     {
5418         /* The delayed and ready lists cannot be accessed, so hold this task
5419          * pending until the scheduler is resumed. */
5420         listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
5421     }
5422
5423     #if ( configNUMBER_OF_CORES == 1 )
5424     {
5425         if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
5426         {
5427             /* Return true if the task removed from the event list has a higher
5428              * priority than the calling task.  This allows the calling task to know if
5429              * it should force a context switch now. */
5430             xReturn = pdTRUE;
5431
5432             /* Mark that a yield is pending in case the user is not using the
5433              * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
5434             xYieldPendings[ 0 ] = pdTRUE;
5435         }
5436         else
5437         {
5438             xReturn = pdFALSE;
5439         }
5440     }
5441     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
5442     {
5443         xReturn = pdFALSE;
5444
5445         #if ( configUSE_PREEMPTION == 1 )
5446         {
5447             prvYieldForTask( pxUnblockedTCB );
5448
5449             if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
5450             {
5451                 xReturn = pdTRUE;
5452             }
5453         }
5454         #endif /* #if ( configUSE_PREEMPTION == 1 ) */
5455     }
5456     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
5457
5458     traceRETURN_xTaskRemoveFromEventList( xReturn );
5459     return xReturn;
5460 }
5461 /*-----------------------------------------------------------*/
5462
5463 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
5464                                         const TickType_t xItemValue )
5465 {
5466     TCB_t * pxUnblockedTCB;
5467
5468     traceENTER_vTaskRemoveFromUnorderedEventList( pxEventListItem, xItemValue );
5469
5470     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED.  It is used by
5471      * the event flags implementation. */
5472     configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
5473
5474     /* Store the new item value in the event list. */
5475     listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
5476
5477     /* Remove the event list form the event flag.  Interrupts do not access
5478      * event flags. */
5479     /* MISRA Ref 11.5.3 [Void pointer assignment] */
5480     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
5481     /* coverity[misra_c_2012_rule_11_5_violation] */
5482     pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem );
5483     configASSERT( pxUnblockedTCB );
5484     listREMOVE_ITEM( pxEventListItem );
5485
5486     #if ( configUSE_TICKLESS_IDLE != 0 )
5487     {
5488         /* If a task is blocked on a kernel object then xNextTaskUnblockTime
5489          * might be set to the blocked task's time out time.  If the task is
5490          * unblocked for a reason other than a timeout xNextTaskUnblockTime is
5491          * normally left unchanged, because it is automatically reset to a new
5492          * value when the tick count equals xNextTaskUnblockTime.  However if
5493          * tickless idling is used it might be more important to enter sleep mode
5494          * at the earliest possible time - so reset xNextTaskUnblockTime here to
5495          * ensure it is updated at the earliest possible time. */
5496         prvResetNextTaskUnblockTime();
5497     }
5498     #endif
5499
5500     /* Remove the task from the delayed list and add it to the ready list.  The
5501      * scheduler is suspended so interrupts will not be accessing the ready
5502      * lists. */
5503     listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
5504     prvAddTaskToReadyList( pxUnblockedTCB );
5505
5506     #if ( configNUMBER_OF_CORES == 1 )
5507     {
5508         if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
5509         {
5510             /* The unblocked task has a priority above that of the calling task, so
5511              * a context switch is required.  This function is called with the
5512              * scheduler suspended so xYieldPending is set so the context switch
5513              * occurs immediately that the scheduler is resumed (unsuspended). */
5514             xYieldPendings[ 0 ] = pdTRUE;
5515         }
5516     }
5517     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
5518     {
5519         #if ( configUSE_PREEMPTION == 1 )
5520         {
5521             taskENTER_CRITICAL();
5522             {
5523                 prvYieldForTask( pxUnblockedTCB );
5524             }
5525             taskEXIT_CRITICAL();
5526         }
5527         #endif
5528     }
5529     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
5530
5531     traceRETURN_vTaskRemoveFromUnorderedEventList();
5532 }
5533 /*-----------------------------------------------------------*/
5534
5535 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
5536 {
5537     traceENTER_vTaskSetTimeOutState( pxTimeOut );
5538
5539     configASSERT( pxTimeOut );
5540     taskENTER_CRITICAL();
5541     {
5542         pxTimeOut->xOverflowCount = xNumOfOverflows;
5543         pxTimeOut->xTimeOnEntering = xTickCount;
5544     }
5545     taskEXIT_CRITICAL();
5546
5547     traceRETURN_vTaskSetTimeOutState();
5548 }
5549 /*-----------------------------------------------------------*/
5550
5551 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
5552 {
5553     traceENTER_vTaskInternalSetTimeOutState( pxTimeOut );
5554
5555     /* For internal use only as it does not use a critical section. */
5556     pxTimeOut->xOverflowCount = xNumOfOverflows;
5557     pxTimeOut->xTimeOnEntering = xTickCount;
5558
5559     traceRETURN_vTaskInternalSetTimeOutState();
5560 }
5561 /*-----------------------------------------------------------*/
5562
5563 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
5564                                  TickType_t * const pxTicksToWait )
5565 {
5566     BaseType_t xReturn;
5567
5568     traceENTER_xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait );
5569
5570     configASSERT( pxTimeOut );
5571     configASSERT( pxTicksToWait );
5572
5573     taskENTER_CRITICAL();
5574     {
5575         /* Minor optimisation.  The tick count cannot change in this block. */
5576         const TickType_t xConstTickCount = xTickCount;
5577         const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
5578
5579         #if ( INCLUDE_xTaskAbortDelay == 1 )
5580             if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
5581             {
5582                 /* The delay was aborted, which is not the same as a time out,
5583                  * but has the same result. */
5584                 pxCurrentTCB->ucDelayAborted = pdFALSE;
5585                 xReturn = pdTRUE;
5586             }
5587             else
5588         #endif
5589
5590         #if ( INCLUDE_vTaskSuspend == 1 )
5591             if( *pxTicksToWait == portMAX_DELAY )
5592             {
5593                 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
5594                  * specified is the maximum block time then the task should block
5595                  * indefinitely, and therefore never time out. */
5596                 xReturn = pdFALSE;
5597             }
5598             else
5599         #endif
5600
5601         if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) )
5602         {
5603             /* The tick count is greater than the time at which
5604              * vTaskSetTimeout() was called, but has also overflowed since
5605              * vTaskSetTimeOut() was called.  It must have wrapped all the way
5606              * around and gone past again. This passed since vTaskSetTimeout()
5607              * was called. */
5608             xReturn = pdTRUE;
5609             *pxTicksToWait = ( TickType_t ) 0;
5610         }
5611         else if( xElapsedTime < *pxTicksToWait )
5612         {
5613             /* Not a genuine timeout. Adjust parameters for time remaining. */
5614             *pxTicksToWait -= xElapsedTime;
5615             vTaskInternalSetTimeOutState( pxTimeOut );
5616             xReturn = pdFALSE;
5617         }
5618         else
5619         {
5620             *pxTicksToWait = ( TickType_t ) 0;
5621             xReturn = pdTRUE;
5622         }
5623     }
5624     taskEXIT_CRITICAL();
5625
5626     traceRETURN_xTaskCheckForTimeOut( xReturn );
5627
5628     return xReturn;
5629 }
5630 /*-----------------------------------------------------------*/
5631
5632 void vTaskMissedYield( void )
5633 {
5634     traceENTER_vTaskMissedYield();
5635
5636     /* Must be called from within a critical section. */
5637     xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
5638
5639     traceRETURN_vTaskMissedYield();
5640 }
5641 /*-----------------------------------------------------------*/
5642
5643 #if ( configUSE_TRACE_FACILITY == 1 )
5644
5645     UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
5646     {
5647         UBaseType_t uxReturn;
5648         TCB_t const * pxTCB;
5649
5650         traceENTER_uxTaskGetTaskNumber( xTask );
5651
5652         if( xTask != NULL )
5653         {
5654             pxTCB = xTask;
5655             uxReturn = pxTCB->uxTaskNumber;
5656         }
5657         else
5658         {
5659             uxReturn = 0U;
5660         }
5661
5662         traceRETURN_uxTaskGetTaskNumber( uxReturn );
5663
5664         return uxReturn;
5665     }
5666
5667 #endif /* configUSE_TRACE_FACILITY */
5668 /*-----------------------------------------------------------*/
5669
5670 #if ( configUSE_TRACE_FACILITY == 1 )
5671
5672     void vTaskSetTaskNumber( TaskHandle_t xTask,
5673                              const UBaseType_t uxHandle )
5674     {
5675         TCB_t * pxTCB;
5676
5677         traceENTER_vTaskSetTaskNumber( xTask, uxHandle );
5678
5679         if( xTask != NULL )
5680         {
5681             pxTCB = xTask;
5682             pxTCB->uxTaskNumber = uxHandle;
5683         }
5684
5685         traceRETURN_vTaskSetTaskNumber();
5686     }
5687
5688 #endif /* configUSE_TRACE_FACILITY */
5689 /*-----------------------------------------------------------*/
5690
5691 /*
5692  * -----------------------------------------------------------
5693  * The passive idle task.
5694  * ----------------------------------------------------------
5695  *
5696  * The passive idle task is used for all the additional cores in a SMP
5697  * system. There must be only 1 active idle task and the rest are passive
5698  * idle tasks.
5699  *
5700  * The portTASK_FUNCTION() macro is used to allow port/compiler specific
5701  * language extensions.  The equivalent prototype for this function is:
5702  *
5703  * void prvPassiveIdleTask( void *pvParameters );
5704  */
5705
5706 #if ( configNUMBER_OF_CORES > 1 )
5707     static portTASK_FUNCTION( prvPassiveIdleTask, pvParameters )
5708     {
5709         ( void ) pvParameters;
5710
5711         taskYIELD();
5712
5713         for( ; configCONTROL_INFINITE_LOOP(); )
5714         {
5715             #if ( configUSE_PREEMPTION == 0 )
5716             {
5717                 /* If we are not using preemption we keep forcing a task switch to
5718                  * see if any other task has become available.  If we are using
5719                  * preemption we don't need to do this as any task becoming available
5720                  * will automatically get the processor anyway. */
5721                 taskYIELD();
5722             }
5723             #endif /* configUSE_PREEMPTION */
5724
5725             #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
5726             {
5727                 /* When using preemption tasks of equal priority will be
5728                  * timesliced.  If a task that is sharing the idle priority is ready
5729                  * to run then the idle task should yield before the end of the
5730                  * timeslice.
5731                  *
5732                  * A critical region is not required here as we are just reading from
5733                  * the list, and an occasional incorrect value will not matter.  If
5734                  * the ready list at the idle priority contains one more task than the
5735                  * number of idle tasks, which is equal to the configured numbers of cores
5736                  * then a task other than the idle task is ready to execute. */
5737                 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
5738                 {
5739                     taskYIELD();
5740                 }
5741                 else
5742                 {
5743                     mtCOVERAGE_TEST_MARKER();
5744                 }
5745             }
5746             #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
5747
5748             #if ( configUSE_PASSIVE_IDLE_HOOK == 1 )
5749             {
5750                 /* Call the user defined function from within the idle task.  This
5751                  * allows the application designer to add background functionality
5752                  * without the overhead of a separate task.
5753                  *
5754                  * This hook is intended to manage core activity such as disabling cores that go idle.
5755                  *
5756                  * NOTE: vApplicationPassiveIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
5757                  * CALL A FUNCTION THAT MIGHT BLOCK. */
5758                 vApplicationPassiveIdleHook();
5759             }
5760             #endif /* configUSE_PASSIVE_IDLE_HOOK */
5761         }
5762     }
5763 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
5764
5765 /*
5766  * -----------------------------------------------------------
5767  * The idle task.
5768  * ----------------------------------------------------------
5769  *
5770  * The portTASK_FUNCTION() macro is used to allow port/compiler specific
5771  * language extensions.  The equivalent prototype for this function is:
5772  *
5773  * void prvIdleTask( void *pvParameters );
5774  *
5775  */
5776
5777 static portTASK_FUNCTION( prvIdleTask, pvParameters )
5778 {
5779     /* Stop warnings. */
5780     ( void ) pvParameters;
5781
5782     /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
5783      * SCHEDULER IS STARTED. **/
5784
5785     /* In case a task that has a secure context deletes itself, in which case
5786      * the idle task is responsible for deleting the task's secure context, if
5787      * any. */
5788     portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
5789
5790     #if ( configNUMBER_OF_CORES > 1 )
5791     {
5792         /* SMP all cores start up in the idle task. This initial yield gets the application
5793          * tasks started. */
5794         taskYIELD();
5795     }
5796     #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
5797
5798     for( ; configCONTROL_INFINITE_LOOP(); )
5799     {
5800         /* See if any tasks have deleted themselves - if so then the idle task
5801          * is responsible for freeing the deleted task's TCB and stack. */
5802         prvCheckTasksWaitingTermination();
5803
5804         #if ( configUSE_PREEMPTION == 0 )
5805         {
5806             /* If we are not using preemption we keep forcing a task switch to
5807              * see if any other task has become available.  If we are using
5808              * preemption we don't need to do this as any task becoming available
5809              * will automatically get the processor anyway. */
5810             taskYIELD();
5811         }
5812         #endif /* configUSE_PREEMPTION */
5813
5814         #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
5815         {
5816             /* When using preemption tasks of equal priority will be
5817              * timesliced.  If a task that is sharing the idle priority is ready
5818              * to run then the idle task should yield before the end of the
5819              * timeslice.
5820              *
5821              * A critical region is not required here as we are just reading from
5822              * the list, and an occasional incorrect value will not matter.  If
5823              * the ready list at the idle priority contains one more task than the
5824              * number of idle tasks, which is equal to the configured numbers of cores
5825              * then a task other than the idle task is ready to execute. */
5826             if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
5827             {
5828                 taskYIELD();
5829             }
5830             else
5831             {
5832                 mtCOVERAGE_TEST_MARKER();
5833             }
5834         }
5835         #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
5836
5837         #if ( configUSE_IDLE_HOOK == 1 )
5838         {
5839             /* Call the user defined function from within the idle task. */
5840             vApplicationIdleHook();
5841         }
5842         #endif /* configUSE_IDLE_HOOK */
5843
5844         /* This conditional compilation should use inequality to 0, not equality
5845          * to 1.  This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
5846          * user defined low power mode  implementations require
5847          * configUSE_TICKLESS_IDLE to be set to a value other than 1. */
5848         #if ( configUSE_TICKLESS_IDLE != 0 )
5849         {
5850             TickType_t xExpectedIdleTime;
5851
5852             /* It is not desirable to suspend then resume the scheduler on
5853              * each iteration of the idle task.  Therefore, a preliminary
5854              * test of the expected idle time is performed without the
5855              * scheduler suspended.  The result here is not necessarily
5856              * valid. */
5857             xExpectedIdleTime = prvGetExpectedIdleTime();
5858
5859             if( xExpectedIdleTime >= ( TickType_t ) configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
5860             {
5861                 vTaskSuspendAll();
5862                 {
5863                     /* Now the scheduler is suspended, the expected idle
5864                      * time can be sampled again, and this time its value can
5865                      * be used. */
5866                     configASSERT( xNextTaskUnblockTime >= xTickCount );
5867                     xExpectedIdleTime = prvGetExpectedIdleTime();
5868
5869                     /* Define the following macro to set xExpectedIdleTime to 0
5870                      * if the application does not want
5871                      * portSUPPRESS_TICKS_AND_SLEEP() to be called. */
5872                     configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
5873
5874                     if( xExpectedIdleTime >= ( TickType_t ) configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
5875                     {
5876                         traceLOW_POWER_IDLE_BEGIN();
5877                         portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
5878                         traceLOW_POWER_IDLE_END();
5879                     }
5880                     else
5881                     {
5882                         mtCOVERAGE_TEST_MARKER();
5883                     }
5884                 }
5885                 ( void ) xTaskResumeAll();
5886             }
5887             else
5888             {
5889                 mtCOVERAGE_TEST_MARKER();
5890             }
5891         }
5892         #endif /* configUSE_TICKLESS_IDLE */
5893
5894         #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PASSIVE_IDLE_HOOK == 1 ) )
5895         {
5896             /* Call the user defined function from within the idle task.  This
5897              * allows the application designer to add background functionality
5898              * without the overhead of a separate task.
5899              *
5900              * This hook is intended to manage core activity such as disabling cores that go idle.
5901              *
5902              * NOTE: vApplicationPassiveIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
5903              * CALL A FUNCTION THAT MIGHT BLOCK. */
5904             vApplicationPassiveIdleHook();
5905         }
5906         #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PASSIVE_IDLE_HOOK == 1 ) ) */
5907     }
5908 }
5909 /*-----------------------------------------------------------*/
5910
5911 #if ( configUSE_TICKLESS_IDLE != 0 )
5912
5913     eSleepModeStatus eTaskConfirmSleepModeStatus( void )
5914     {
5915         #if ( INCLUDE_vTaskSuspend == 1 )
5916             /* The idle task exists in addition to the application tasks. */
5917             const UBaseType_t uxNonApplicationTasks = configNUMBER_OF_CORES;
5918         #endif /* INCLUDE_vTaskSuspend */
5919
5920         eSleepModeStatus eReturn = eStandardSleep;
5921
5922         traceENTER_eTaskConfirmSleepModeStatus();
5923
5924         /* This function must be called from a critical section. */
5925
5926         if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0U )
5927         {
5928             /* A task was made ready while the scheduler was suspended. */
5929             eReturn = eAbortSleep;
5930         }
5931         else if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
5932         {
5933             /* A yield was pended while the scheduler was suspended. */
5934             eReturn = eAbortSleep;
5935         }
5936         else if( xPendedTicks != 0U )
5937         {
5938             /* A tick interrupt has already occurred but was held pending
5939              * because the scheduler is suspended. */
5940             eReturn = eAbortSleep;
5941         }
5942
5943         #if ( INCLUDE_vTaskSuspend == 1 )
5944             else if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
5945             {
5946                 /* If all the tasks are in the suspended list (which might mean they
5947                  * have an infinite block time rather than actually being suspended)
5948                  * then it is safe to turn all clocks off and just wait for external
5949                  * interrupts. */
5950                 eReturn = eNoTasksWaitingTimeout;
5951             }
5952         #endif /* INCLUDE_vTaskSuspend */
5953         else
5954         {
5955             mtCOVERAGE_TEST_MARKER();
5956         }
5957
5958         traceRETURN_eTaskConfirmSleepModeStatus( eReturn );
5959
5960         return eReturn;
5961     }
5962
5963 #endif /* configUSE_TICKLESS_IDLE */
5964 /*-----------------------------------------------------------*/
5965
5966 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
5967
5968     void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
5969                                             BaseType_t xIndex,
5970                                             void * pvValue )
5971     {
5972         TCB_t * pxTCB;
5973
5974         traceENTER_vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue );
5975
5976         if( ( xIndex >= 0 ) &&
5977             ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
5978         {
5979             pxTCB = prvGetTCBFromHandle( xTaskToSet );
5980             configASSERT( pxTCB != NULL );
5981             pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
5982         }
5983
5984         traceRETURN_vTaskSetThreadLocalStoragePointer();
5985     }
5986
5987 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
5988 /*-----------------------------------------------------------*/
5989
5990 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
5991
5992     void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
5993                                                BaseType_t xIndex )
5994     {
5995         void * pvReturn = NULL;
5996         TCB_t * pxTCB;
5997
5998         traceENTER_pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex );
5999
6000         if( ( xIndex >= 0 ) &&
6001             ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
6002         {
6003             pxTCB = prvGetTCBFromHandle( xTaskToQuery );
6004             pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
6005         }
6006         else
6007         {
6008             pvReturn = NULL;
6009         }
6010
6011         traceRETURN_pvTaskGetThreadLocalStoragePointer( pvReturn );
6012
6013         return pvReturn;
6014     }
6015
6016 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
6017 /*-----------------------------------------------------------*/
6018
6019 #if ( portUSING_MPU_WRAPPERS == 1 )
6020
6021     void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
6022                                   const MemoryRegion_t * const pxRegions )
6023     {
6024         TCB_t * pxTCB;
6025
6026         traceENTER_vTaskAllocateMPURegions( xTaskToModify, pxRegions );
6027
6028         /* If null is passed in here then we are modifying the MPU settings of
6029          * the calling task. */
6030         pxTCB = prvGetTCBFromHandle( xTaskToModify );
6031
6032         vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), pxRegions, NULL, 0 );
6033
6034         traceRETURN_vTaskAllocateMPURegions();
6035     }
6036
6037 #endif /* portUSING_MPU_WRAPPERS */
6038 /*-----------------------------------------------------------*/
6039
6040 static void prvInitialiseTaskLists( void )
6041 {
6042     UBaseType_t uxPriority;
6043
6044     for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
6045     {
6046         vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
6047     }
6048
6049     vListInitialise( &xDelayedTaskList1 );
6050     vListInitialise( &xDelayedTaskList2 );
6051     vListInitialise( &xPendingReadyList );
6052
6053     #if ( INCLUDE_vTaskDelete == 1 )
6054     {
6055         vListInitialise( &xTasksWaitingTermination );
6056     }
6057     #endif /* INCLUDE_vTaskDelete */
6058
6059     #if ( INCLUDE_vTaskSuspend == 1 )
6060     {
6061         vListInitialise( &xSuspendedTaskList );
6062     }
6063     #endif /* INCLUDE_vTaskSuspend */
6064
6065     /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
6066      * using list2. */
6067     pxDelayedTaskList = &xDelayedTaskList1;
6068     pxOverflowDelayedTaskList = &xDelayedTaskList2;
6069 }
6070 /*-----------------------------------------------------------*/
6071
6072 static void prvCheckTasksWaitingTermination( void )
6073 {
6074     /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
6075
6076     #if ( INCLUDE_vTaskDelete == 1 )
6077     {
6078         TCB_t * pxTCB;
6079
6080         /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
6081          * being called too often in the idle task. */
6082         while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
6083         {
6084             #if ( configNUMBER_OF_CORES == 1 )
6085             {
6086                 taskENTER_CRITICAL();
6087                 {
6088                     {
6089                         /* MISRA Ref 11.5.3 [Void pointer assignment] */
6090                         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6091                         /* coverity[misra_c_2012_rule_11_5_violation] */
6092                         pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
6093                         ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
6094                         --uxCurrentNumberOfTasks;
6095                         --uxDeletedTasksWaitingCleanUp;
6096                     }
6097                 }
6098                 taskEXIT_CRITICAL();
6099
6100                 prvDeleteTCB( pxTCB );
6101             }
6102             #else /* #if( configNUMBER_OF_CORES == 1 ) */
6103             {
6104                 pxTCB = NULL;
6105
6106                 taskENTER_CRITICAL();
6107                 {
6108                     /* For SMP, multiple idles can be running simultaneously
6109                      * and we need to check that other idles did not cleanup while we were
6110                      * waiting to enter the critical section. */
6111                     if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
6112                     {
6113                         /* MISRA Ref 11.5.3 [Void pointer assignment] */
6114                         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6115                         /* coverity[misra_c_2012_rule_11_5_violation] */
6116                         pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
6117
6118                         if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
6119                         {
6120                             ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
6121                             --uxCurrentNumberOfTasks;
6122                             --uxDeletedTasksWaitingCleanUp;
6123                         }
6124                         else
6125                         {
6126                             /* The TCB to be deleted still has not yet been switched out
6127                              * by the scheduler, so we will just exit this loop early and
6128                              * try again next time. */
6129                             taskEXIT_CRITICAL();
6130                             break;
6131                         }
6132                     }
6133                 }
6134                 taskEXIT_CRITICAL();
6135
6136                 if( pxTCB != NULL )
6137                 {
6138                     prvDeleteTCB( pxTCB );
6139                 }
6140             }
6141             #endif /* #if( configNUMBER_OF_CORES == 1 ) */
6142         }
6143     }
6144     #endif /* INCLUDE_vTaskDelete */
6145 }
6146 /*-----------------------------------------------------------*/
6147
6148 #if ( configUSE_TRACE_FACILITY == 1 )
6149
6150     void vTaskGetInfo( TaskHandle_t xTask,
6151                        TaskStatus_t * pxTaskStatus,
6152                        BaseType_t xGetFreeStackSpace,
6153                        eTaskState eState )
6154     {
6155         TCB_t * pxTCB;
6156
6157         traceENTER_vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState );
6158
6159         /* xTask is NULL then get the state of the calling task. */
6160         pxTCB = prvGetTCBFromHandle( xTask );
6161
6162         pxTaskStatus->xHandle = pxTCB;
6163         pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
6164         pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
6165         pxTaskStatus->pxStackBase = pxTCB->pxStack;
6166         #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
6167             pxTaskStatus->pxTopOfStack = ( StackType_t * ) pxTCB->pxTopOfStack;
6168             pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack;
6169         #endif
6170         pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
6171
6172         #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
6173         {
6174             pxTaskStatus->uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
6175         }
6176         #endif
6177
6178         #if ( configUSE_MUTEXES == 1 )
6179         {
6180             pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
6181         }
6182         #else
6183         {
6184             pxTaskStatus->uxBasePriority = 0;
6185         }
6186         #endif
6187
6188         #if ( configGENERATE_RUN_TIME_STATS == 1 )
6189         {
6190             pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
6191         }
6192         #else
6193         {
6194             pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0;
6195         }
6196         #endif
6197
6198         /* Obtaining the task state is a little fiddly, so is only done if the
6199          * value of eState passed into this function is eInvalid - otherwise the
6200          * state is just set to whatever is passed in. */
6201         if( eState != eInvalid )
6202         {
6203             if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6204             {
6205                 pxTaskStatus->eCurrentState = eRunning;
6206             }
6207             else
6208             {
6209                 pxTaskStatus->eCurrentState = eState;
6210
6211                 #if ( INCLUDE_vTaskSuspend == 1 )
6212                 {
6213                     /* If the task is in the suspended list then there is a
6214                      *  chance it is actually just blocked indefinitely - so really
6215                      *  it should be reported as being in the Blocked state. */
6216                     if( eState == eSuspended )
6217                     {
6218                         vTaskSuspendAll();
6219                         {
6220                             if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
6221                             {
6222                                 pxTaskStatus->eCurrentState = eBlocked;
6223                             }
6224                             else
6225                             {
6226                                 BaseType_t x;
6227
6228                                 /* The task does not appear on the event list item of
6229                                  * and of the RTOS objects, but could still be in the
6230                                  * blocked state if it is waiting on its notification
6231                                  * rather than waiting on an object.  If not, is
6232                                  * suspended. */
6233                                 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
6234                                 {
6235                                     if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
6236                                     {
6237                                         pxTaskStatus->eCurrentState = eBlocked;
6238                                         break;
6239                                     }
6240                                 }
6241                             }
6242                         }
6243                         ( void ) xTaskResumeAll();
6244                     }
6245                 }
6246                 #endif /* INCLUDE_vTaskSuspend */
6247
6248                 /* Tasks can be in pending ready list and other state list at the
6249                  * same time. These tasks are in ready state no matter what state
6250                  * list the task is in. */
6251                 taskENTER_CRITICAL();
6252                 {
6253                     if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE )
6254                     {
6255                         pxTaskStatus->eCurrentState = eReady;
6256                     }
6257                 }
6258                 taskEXIT_CRITICAL();
6259             }
6260         }
6261         else
6262         {
6263             pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
6264         }
6265
6266         /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
6267          * parameter is provided to allow it to be skipped. */
6268         if( xGetFreeStackSpace != pdFALSE )
6269         {
6270             #if ( portSTACK_GROWTH > 0 )
6271             {
6272                 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
6273             }
6274             #else
6275             {
6276                 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
6277             }
6278             #endif
6279         }
6280         else
6281         {
6282             pxTaskStatus->usStackHighWaterMark = 0;
6283         }
6284
6285         traceRETURN_vTaskGetInfo();
6286     }
6287
6288 #endif /* configUSE_TRACE_FACILITY */
6289 /*-----------------------------------------------------------*/
6290
6291 #if ( configUSE_TRACE_FACILITY == 1 )
6292
6293     static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
6294                                                      List_t * pxList,
6295                                                      eTaskState eState )
6296     {
6297         configLIST_VOLATILE TCB_t * pxNextTCB;
6298         configLIST_VOLATILE TCB_t * pxFirstTCB;
6299         UBaseType_t uxTask = 0;
6300
6301         if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
6302         {
6303             /* MISRA Ref 11.5.3 [Void pointer assignment] */
6304             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6305             /* coverity[misra_c_2012_rule_11_5_violation] */
6306             listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
6307
6308             /* Populate an TaskStatus_t structure within the
6309              * pxTaskStatusArray array for each task that is referenced from
6310              * pxList.  See the definition of TaskStatus_t in task.h for the
6311              * meaning of each TaskStatus_t structure member. */
6312             do
6313             {
6314                 /* MISRA Ref 11.5.3 [Void pointer assignment] */
6315                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6316                 /* coverity[misra_c_2012_rule_11_5_violation] */
6317                 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
6318                 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
6319                 uxTask++;
6320             } while( pxNextTCB != pxFirstTCB );
6321         }
6322         else
6323         {
6324             mtCOVERAGE_TEST_MARKER();
6325         }
6326
6327         return uxTask;
6328     }
6329
6330 #endif /* configUSE_TRACE_FACILITY */
6331 /*-----------------------------------------------------------*/
6332
6333 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
6334
6335     static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
6336     {
6337         uint32_t ulCount = 0U;
6338
6339         while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
6340         {
6341             pucStackByte -= portSTACK_GROWTH;
6342             ulCount++;
6343         }
6344
6345         ulCount /= ( uint32_t ) sizeof( StackType_t );
6346
6347         return ( configSTACK_DEPTH_TYPE ) ulCount;
6348     }
6349
6350 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
6351 /*-----------------------------------------------------------*/
6352
6353 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
6354
6355 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
6356  * same except for their return type.  Using configSTACK_DEPTH_TYPE allows the
6357  * user to determine the return type.  It gets around the problem of the value
6358  * overflowing on 8-bit types without breaking backward compatibility for
6359  * applications that expect an 8-bit return type. */
6360     configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
6361     {
6362         TCB_t * pxTCB;
6363         uint8_t * pucEndOfStack;
6364         configSTACK_DEPTH_TYPE uxReturn;
6365
6366         traceENTER_uxTaskGetStackHighWaterMark2( xTask );
6367
6368         /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
6369          * the same except for their return type.  Using configSTACK_DEPTH_TYPE
6370          * allows the user to determine the return type.  It gets around the
6371          * problem of the value overflowing on 8-bit types without breaking
6372          * backward compatibility for applications that expect an 8-bit return
6373          * type. */
6374
6375         pxTCB = prvGetTCBFromHandle( xTask );
6376
6377         #if portSTACK_GROWTH < 0
6378         {
6379             pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
6380         }
6381         #else
6382         {
6383             pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
6384         }
6385         #endif
6386
6387         uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
6388
6389         traceRETURN_uxTaskGetStackHighWaterMark2( uxReturn );
6390
6391         return uxReturn;
6392     }
6393
6394 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
6395 /*-----------------------------------------------------------*/
6396
6397 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
6398
6399     UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
6400     {
6401         TCB_t * pxTCB;
6402         uint8_t * pucEndOfStack;
6403         UBaseType_t uxReturn;
6404
6405         traceENTER_uxTaskGetStackHighWaterMark( xTask );
6406
6407         pxTCB = prvGetTCBFromHandle( xTask );
6408
6409         #if portSTACK_GROWTH < 0
6410         {
6411             pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
6412         }
6413         #else
6414         {
6415             pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
6416         }
6417         #endif
6418
6419         uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
6420
6421         traceRETURN_uxTaskGetStackHighWaterMark( uxReturn );
6422
6423         return uxReturn;
6424     }
6425
6426 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
6427 /*-----------------------------------------------------------*/
6428
6429 #if ( INCLUDE_vTaskDelete == 1 )
6430
6431     static void prvDeleteTCB( TCB_t * pxTCB )
6432     {
6433         /* This call is required specifically for the TriCore port.  It must be
6434          * above the vPortFree() calls.  The call is also used by ports/demos that
6435          * want to allocate and clean RAM statically. */
6436         portCLEAN_UP_TCB( pxTCB );
6437
6438         #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
6439         {
6440             /* Free up the memory allocated for the task's TLS Block. */
6441             configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock );
6442         }
6443         #endif
6444
6445         #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
6446         {
6447             /* The task can only have been allocated dynamically - free both
6448              * the stack and TCB. */
6449             vPortFreeStack( pxTCB->pxStack );
6450             vPortFree( pxTCB );
6451         }
6452         #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
6453         {
6454             /* The task could have been allocated statically or dynamically, so
6455              * check what was statically allocated before trying to free the
6456              * memory. */
6457             if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
6458             {
6459                 /* Both the stack and TCB were allocated dynamically, so both
6460                  * must be freed. */
6461                 vPortFreeStack( pxTCB->pxStack );
6462                 vPortFree( pxTCB );
6463             }
6464             else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
6465             {
6466                 /* Only the stack was statically allocated, so the TCB is the
6467                  * only memory that must be freed. */
6468                 vPortFree( pxTCB );
6469             }
6470             else
6471             {
6472                 /* Neither the stack nor the TCB were allocated dynamically, so
6473                  * nothing needs to be freed. */
6474                 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
6475                 mtCOVERAGE_TEST_MARKER();
6476             }
6477         }
6478         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
6479     }
6480
6481 #endif /* INCLUDE_vTaskDelete */
6482 /*-----------------------------------------------------------*/
6483
6484 static void prvResetNextTaskUnblockTime( void )
6485 {
6486     if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
6487     {
6488         /* The new current delayed list is empty.  Set xNextTaskUnblockTime to
6489          * the maximum possible value so it is  extremely unlikely that the
6490          * if( xTickCount >= xNextTaskUnblockTime ) test will pass until
6491          * there is an item in the delayed list. */
6492         xNextTaskUnblockTime = portMAX_DELAY;
6493     }
6494     else
6495     {
6496         /* The new current delayed list is not empty, get the value of
6497          * the item at the head of the delayed list.  This is the time at
6498          * which the task at the head of the delayed list should be removed
6499          * from the Blocked state. */
6500         xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList );
6501     }
6502 }
6503 /*-----------------------------------------------------------*/
6504
6505 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUMBER_OF_CORES > 1 )
6506
6507     #if ( configNUMBER_OF_CORES == 1 )
6508         TaskHandle_t xTaskGetCurrentTaskHandle( void )
6509         {
6510             TaskHandle_t xReturn;
6511
6512             traceENTER_xTaskGetCurrentTaskHandle();
6513
6514             /* A critical section is not required as this is not called from
6515              * an interrupt and the current TCB will always be the same for any
6516              * individual execution thread. */
6517             xReturn = pxCurrentTCB;
6518
6519             traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
6520
6521             return xReturn;
6522         }
6523     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
6524         TaskHandle_t xTaskGetCurrentTaskHandle( void )
6525         {
6526             TaskHandle_t xReturn;
6527             UBaseType_t uxSavedInterruptStatus;
6528
6529             traceENTER_xTaskGetCurrentTaskHandle();
6530
6531             uxSavedInterruptStatus = portSET_INTERRUPT_MASK();
6532             {
6533                 xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
6534             }
6535             portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus );
6536
6537             traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
6538
6539             return xReturn;
6540         }
6541
6542         TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID )
6543         {
6544             TaskHandle_t xReturn = NULL;
6545
6546             traceENTER_xTaskGetCurrentTaskHandleForCore( xCoreID );
6547
6548             if( taskVALID_CORE_ID( xCoreID ) != pdFALSE )
6549             {
6550                 xReturn = pxCurrentTCBs[ xCoreID ];
6551             }
6552
6553             traceRETURN_xTaskGetCurrentTaskHandleForCore( xReturn );
6554
6555             return xReturn;
6556         }
6557     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
6558
6559 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
6560 /*-----------------------------------------------------------*/
6561
6562 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
6563
6564     BaseType_t xTaskGetSchedulerState( void )
6565     {
6566         BaseType_t xReturn;
6567
6568         traceENTER_xTaskGetSchedulerState();
6569
6570         if( xSchedulerRunning == pdFALSE )
6571         {
6572             xReturn = taskSCHEDULER_NOT_STARTED;
6573         }
6574         else
6575         {
6576             #if ( configNUMBER_OF_CORES > 1 )
6577                 taskENTER_CRITICAL();
6578             #endif
6579             {
6580                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
6581                 {
6582                     xReturn = taskSCHEDULER_RUNNING;
6583                 }
6584                 else
6585                 {
6586                     xReturn = taskSCHEDULER_SUSPENDED;
6587                 }
6588             }
6589             #if ( configNUMBER_OF_CORES > 1 )
6590                 taskEXIT_CRITICAL();
6591             #endif
6592         }
6593
6594         traceRETURN_xTaskGetSchedulerState( xReturn );
6595
6596         return xReturn;
6597     }
6598
6599 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
6600 /*-----------------------------------------------------------*/
6601
6602 #if ( configUSE_MUTEXES == 1 )
6603
6604     BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
6605     {
6606         TCB_t * const pxMutexHolderTCB = pxMutexHolder;
6607         BaseType_t xReturn = pdFALSE;
6608
6609         traceENTER_xTaskPriorityInherit( pxMutexHolder );
6610
6611         /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority
6612          * inheritance is not applied in this scenario. */
6613         if( pxMutexHolder != NULL )
6614         {
6615             /* If the holder of the mutex has a priority below the priority of
6616              * the task attempting to obtain the mutex then it will temporarily
6617              * inherit the priority of the task attempting to obtain the mutex. */
6618             if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
6619             {
6620                 /* Adjust the mutex holder state to account for its new
6621                  * priority.  Only reset the event list item value if the value is
6622                  * not being used for anything else. */
6623                 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0UL ) )
6624                 {
6625                     listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority );
6626                 }
6627                 else
6628                 {
6629                     mtCOVERAGE_TEST_MARKER();
6630                 }
6631
6632                 /* If the task being modified is in the ready state it will need
6633                  * to be moved into a new list. */
6634                 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
6635                 {
6636                     if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6637                     {
6638                         /* It is known that the task is in its ready list so
6639                          * there is no need to check again and the port level
6640                          * reset macro can be called directly. */
6641                         portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
6642                     }
6643                     else
6644                     {
6645                         mtCOVERAGE_TEST_MARKER();
6646                     }
6647
6648                     /* Inherit the priority before being moved into the new list. */
6649                     pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
6650                     prvAddTaskToReadyList( pxMutexHolderTCB );
6651                     #if ( configNUMBER_OF_CORES > 1 )
6652                     {
6653                         /* The priority of the task is raised. Yield for this task
6654                          * if it is not running. */
6655                         if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE )
6656                         {
6657                             prvYieldForTask( pxMutexHolderTCB );
6658                         }
6659                     }
6660                     #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6661                 }
6662                 else
6663                 {
6664                     /* Just inherit the priority. */
6665                     pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
6666                 }
6667
6668                 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
6669
6670                 /* Inheritance occurred. */
6671                 xReturn = pdTRUE;
6672             }
6673             else
6674             {
6675                 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
6676                 {
6677                     /* The base priority of the mutex holder is lower than the
6678                      * priority of the task attempting to take the mutex, but the
6679                      * current priority of the mutex holder is not lower than the
6680                      * priority of the task attempting to take the mutex.
6681                      * Therefore the mutex holder must have already inherited a
6682                      * priority, but inheritance would have occurred if that had
6683                      * not been the case. */
6684                     xReturn = pdTRUE;
6685                 }
6686                 else
6687                 {
6688                     mtCOVERAGE_TEST_MARKER();
6689                 }
6690             }
6691         }
6692         else
6693         {
6694             mtCOVERAGE_TEST_MARKER();
6695         }
6696
6697         traceRETURN_xTaskPriorityInherit( xReturn );
6698
6699         return xReturn;
6700     }
6701
6702 #endif /* configUSE_MUTEXES */
6703 /*-----------------------------------------------------------*/
6704
6705 #if ( configUSE_MUTEXES == 1 )
6706
6707     BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
6708     {
6709         TCB_t * const pxTCB = pxMutexHolder;
6710         BaseType_t xReturn = pdFALSE;
6711
6712         traceENTER_xTaskPriorityDisinherit( pxMutexHolder );
6713
6714         if( pxMutexHolder != NULL )
6715         {
6716             /* A task can only have an inherited priority if it holds the mutex.
6717              * If the mutex is held by a task then it cannot be given from an
6718              * interrupt, and if a mutex is given by the holding task then it must
6719              * be the running state task. */
6720             configASSERT( pxTCB == pxCurrentTCB );
6721             configASSERT( pxTCB->uxMutexesHeld );
6722             ( pxTCB->uxMutexesHeld )--;
6723
6724             /* Has the holder of the mutex inherited the priority of another
6725              * task? */
6726             if( pxTCB->uxPriority != pxTCB->uxBasePriority )
6727             {
6728                 /* Only disinherit if no other mutexes are held. */
6729                 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
6730                 {
6731                     /* A task can only have an inherited priority if it holds
6732                      * the mutex.  If the mutex is held by a task then it cannot be
6733                      * given from an interrupt, and if a mutex is given by the
6734                      * holding task then it must be the running state task.  Remove
6735                      * the holding task from the ready list. */
6736                     if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6737                     {
6738                         portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
6739                     }
6740                     else
6741                     {
6742                         mtCOVERAGE_TEST_MARKER();
6743                     }
6744
6745                     /* Disinherit the priority before adding the task into the
6746                      * new  ready list. */
6747                     traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
6748                     pxTCB->uxPriority = pxTCB->uxBasePriority;
6749
6750                     /* Reset the event list item value.  It cannot be in use for
6751                      * any other purpose if this task is running, and it must be
6752                      * running to give back the mutex. */
6753                     listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
6754                     prvAddTaskToReadyList( pxTCB );
6755                     #if ( configNUMBER_OF_CORES > 1 )
6756                     {
6757                         /* The priority of the task is dropped. Yield the core on
6758                          * which the task is running. */
6759                         if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6760                         {
6761                             prvYieldCore( pxTCB->xTaskRunState );
6762                         }
6763                     }
6764                     #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6765
6766                     /* Return true to indicate that a context switch is required.
6767                      * This is only actually required in the corner case whereby
6768                      * multiple mutexes were held and the mutexes were given back
6769                      * in an order different to that in which they were taken.
6770                      * If a context switch did not occur when the first mutex was
6771                      * returned, even if a task was waiting on it, then a context
6772                      * switch should occur when the last mutex is returned whether
6773                      * a task is waiting on it or not. */
6774                     xReturn = pdTRUE;
6775                 }
6776                 else
6777                 {
6778                     mtCOVERAGE_TEST_MARKER();
6779                 }
6780             }
6781             else
6782             {
6783                 mtCOVERAGE_TEST_MARKER();
6784             }
6785         }
6786         else
6787         {
6788             mtCOVERAGE_TEST_MARKER();
6789         }
6790
6791         traceRETURN_xTaskPriorityDisinherit( xReturn );
6792
6793         return xReturn;
6794     }
6795
6796 #endif /* configUSE_MUTEXES */
6797 /*-----------------------------------------------------------*/
6798
6799 #if ( configUSE_MUTEXES == 1 )
6800
6801     void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
6802                                               UBaseType_t uxHighestPriorityWaitingTask )
6803     {
6804         TCB_t * const pxTCB = pxMutexHolder;
6805         UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
6806         const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
6807
6808         traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask );
6809
6810         if( pxMutexHolder != NULL )
6811         {
6812             /* If pxMutexHolder is not NULL then the holder must hold at least
6813              * one mutex. */
6814             configASSERT( pxTCB->uxMutexesHeld );
6815
6816             /* Determine the priority to which the priority of the task that
6817              * holds the mutex should be set.  This will be the greater of the
6818              * holding task's base priority and the priority of the highest
6819              * priority task that is waiting to obtain the mutex. */
6820             if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
6821             {
6822                 uxPriorityToUse = uxHighestPriorityWaitingTask;
6823             }
6824             else
6825             {
6826                 uxPriorityToUse = pxTCB->uxBasePriority;
6827             }
6828
6829             /* Does the priority need to change? */
6830             if( pxTCB->uxPriority != uxPriorityToUse )
6831             {
6832                 /* Only disinherit if no other mutexes are held.  This is a
6833                  * simplification in the priority inheritance implementation.  If
6834                  * the task that holds the mutex is also holding other mutexes then
6835                  * the other mutexes may have caused the priority inheritance. */
6836                 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
6837                 {
6838                     /* If a task has timed out because it already holds the
6839                      * mutex it was trying to obtain then it cannot of inherited
6840                      * its own priority. */
6841                     configASSERT( pxTCB != pxCurrentTCB );
6842
6843                     /* Disinherit the priority, remembering the previous
6844                      * priority to facilitate determining the subject task's
6845                      * state. */
6846                     traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse );
6847                     uxPriorityUsedOnEntry = pxTCB->uxPriority;
6848                     pxTCB->uxPriority = uxPriorityToUse;
6849
6850                     /* Only reset the event list item value if the value is not
6851                      * being used for anything else. */
6852                     if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0UL ) )
6853                     {
6854                         listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse );
6855                     }
6856                     else
6857                     {
6858                         mtCOVERAGE_TEST_MARKER();
6859                     }
6860
6861                     /* If the running task is not the task that holds the mutex
6862                      * then the task that holds the mutex could be in either the
6863                      * Ready, Blocked or Suspended states.  Only remove the task
6864                      * from its current state list if it is in the Ready state as
6865                      * the task's priority is going to change and there is one
6866                      * Ready list per priority. */
6867                     if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
6868                     {
6869                         if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6870                         {
6871                             /* It is known that the task is in its ready list so
6872                              * there is no need to check again and the port level
6873                              * reset macro can be called directly. */
6874                             portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
6875                         }
6876                         else
6877                         {
6878                             mtCOVERAGE_TEST_MARKER();
6879                         }
6880
6881                         prvAddTaskToReadyList( pxTCB );
6882                         #if ( configNUMBER_OF_CORES > 1 )
6883                         {
6884                             /* The priority of the task is dropped. Yield the core on
6885                              * which the task is running. */
6886                             if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6887                             {
6888                                 prvYieldCore( pxTCB->xTaskRunState );
6889                             }
6890                         }
6891                         #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6892                     }
6893                     else
6894                     {
6895                         mtCOVERAGE_TEST_MARKER();
6896                     }
6897                 }
6898                 else
6899                 {
6900                     mtCOVERAGE_TEST_MARKER();
6901                 }
6902             }
6903             else
6904             {
6905                 mtCOVERAGE_TEST_MARKER();
6906             }
6907         }
6908         else
6909         {
6910             mtCOVERAGE_TEST_MARKER();
6911         }
6912
6913         traceRETURN_vTaskPriorityDisinheritAfterTimeout();
6914     }
6915
6916 #endif /* configUSE_MUTEXES */
6917 /*-----------------------------------------------------------*/
6918
6919 #if ( configNUMBER_OF_CORES > 1 )
6920
6921 /* If not in a critical section then yield immediately.
6922  * Otherwise set xYieldPendings to true to wait to
6923  * yield until exiting the critical section.
6924  */
6925     void vTaskYieldWithinAPI( void )
6926     {
6927         traceENTER_vTaskYieldWithinAPI();
6928
6929         if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6930         {
6931             portYIELD();
6932         }
6933         else
6934         {
6935             xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
6936         }
6937
6938         traceRETURN_vTaskYieldWithinAPI();
6939     }
6940 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
6941
6942 /*-----------------------------------------------------------*/
6943
6944 #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
6945
6946     void vTaskEnterCritical( void )
6947     {
6948         traceENTER_vTaskEnterCritical();
6949
6950         portDISABLE_INTERRUPTS();
6951
6952         if( xSchedulerRunning != pdFALSE )
6953         {
6954             ( pxCurrentTCB->uxCriticalNesting )++;
6955
6956             /* This is not the interrupt safe version of the enter critical
6957              * function so  assert() if it is being called from an interrupt
6958              * context.  Only API functions that end in "FromISR" can be used in an
6959              * interrupt.  Only assert if the critical nesting count is 1 to
6960              * protect against recursive calls if the assert function also uses a
6961              * critical section. */
6962             if( pxCurrentTCB->uxCriticalNesting == 1U )
6963             {
6964                 portASSERT_IF_IN_ISR();
6965             }
6966         }
6967         else
6968         {
6969             mtCOVERAGE_TEST_MARKER();
6970         }
6971
6972         traceRETURN_vTaskEnterCritical();
6973     }
6974
6975 #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
6976 /*-----------------------------------------------------------*/
6977
6978 #if ( configNUMBER_OF_CORES > 1 )
6979
6980     void vTaskEnterCritical( void )
6981     {
6982         traceENTER_vTaskEnterCritical();
6983
6984         portDISABLE_INTERRUPTS();
6985
6986         if( xSchedulerRunning != pdFALSE )
6987         {
6988             if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6989             {
6990                 portGET_TASK_LOCK();
6991                 portGET_ISR_LOCK();
6992             }
6993
6994             portINCREMENT_CRITICAL_NESTING_COUNT();
6995
6996             /* This is not the interrupt safe version of the enter critical
6997              * function so  assert() if it is being called from an interrupt
6998              * context.  Only API functions that end in "FromISR" can be used in an
6999              * interrupt.  Only assert if the critical nesting count is 1 to
7000              * protect against recursive calls if the assert function also uses a
7001              * critical section. */
7002             if( portGET_CRITICAL_NESTING_COUNT() == 1U )
7003             {
7004                 portASSERT_IF_IN_ISR();
7005
7006                 if( uxSchedulerSuspended == 0U )
7007                 {
7008                     /* The only time there would be a problem is if this is called
7009                      * before a context switch and vTaskExitCritical() is called
7010                      * after pxCurrentTCB changes. Therefore this should not be
7011                      * used within vTaskSwitchContext(). */
7012                     prvCheckForRunStateChange();
7013                 }
7014             }
7015         }
7016         else
7017         {
7018             mtCOVERAGE_TEST_MARKER();
7019         }
7020
7021         traceRETURN_vTaskEnterCritical();
7022     }
7023
7024 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7025
7026 /*-----------------------------------------------------------*/
7027
7028 #if ( configNUMBER_OF_CORES > 1 )
7029
7030     UBaseType_t vTaskEnterCriticalFromISR( void )
7031     {
7032         UBaseType_t uxSavedInterruptStatus = 0;
7033
7034         traceENTER_vTaskEnterCriticalFromISR();
7035
7036         if( xSchedulerRunning != pdFALSE )
7037         {
7038             uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
7039
7040             if( portGET_CRITICAL_NESTING_COUNT() == 0U )
7041             {
7042                 portGET_ISR_LOCK();
7043             }
7044
7045             portINCREMENT_CRITICAL_NESTING_COUNT();
7046         }
7047         else
7048         {
7049             mtCOVERAGE_TEST_MARKER();
7050         }
7051
7052         traceRETURN_vTaskEnterCriticalFromISR( uxSavedInterruptStatus );
7053
7054         return uxSavedInterruptStatus;
7055     }
7056
7057 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7058 /*-----------------------------------------------------------*/
7059
7060 #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
7061
7062     void vTaskExitCritical( void )
7063     {
7064         traceENTER_vTaskExitCritical();
7065
7066         if( xSchedulerRunning != pdFALSE )
7067         {
7068             /* If pxCurrentTCB->uxCriticalNesting is zero then this function
7069              * does not match a previous call to vTaskEnterCritical(). */
7070             configASSERT( pxCurrentTCB->uxCriticalNesting > 0U );
7071
7072             /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
7073              * to exit critical section from ISR. */
7074             portASSERT_IF_IN_ISR();
7075
7076             if( pxCurrentTCB->uxCriticalNesting > 0U )
7077             {
7078                 ( pxCurrentTCB->uxCriticalNesting )--;
7079
7080                 if( pxCurrentTCB->uxCriticalNesting == 0U )
7081                 {
7082                     portENABLE_INTERRUPTS();
7083                 }
7084                 else
7085                 {
7086                     mtCOVERAGE_TEST_MARKER();
7087                 }
7088             }
7089             else
7090             {
7091                 mtCOVERAGE_TEST_MARKER();
7092             }
7093         }
7094         else
7095         {
7096             mtCOVERAGE_TEST_MARKER();
7097         }
7098
7099         traceRETURN_vTaskExitCritical();
7100     }
7101
7102 #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
7103 /*-----------------------------------------------------------*/
7104
7105 #if ( configNUMBER_OF_CORES > 1 )
7106
7107     void vTaskExitCritical( void )
7108     {
7109         traceENTER_vTaskExitCritical();
7110
7111         if( xSchedulerRunning != pdFALSE )
7112         {
7113             /* If critical nesting count is zero then this function
7114              * does not match a previous call to vTaskEnterCritical(). */
7115             configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
7116
7117             /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
7118              * to exit critical section from ISR. */
7119             portASSERT_IF_IN_ISR();
7120
7121             if( portGET_CRITICAL_NESTING_COUNT() > 0U )
7122             {
7123                 portDECREMENT_CRITICAL_NESTING_COUNT();
7124
7125                 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
7126                 {
7127                     BaseType_t xYieldCurrentTask;
7128
7129                     /* Get the xYieldPending stats inside the critical section. */
7130                     xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ];
7131
7132                     portRELEASE_ISR_LOCK();
7133                     portRELEASE_TASK_LOCK();
7134                     portENABLE_INTERRUPTS();
7135
7136                     /* When a task yields in a critical section it just sets
7137                      * xYieldPending to true. So now that we have exited the
7138                      * critical section check if xYieldPending is true, and
7139                      * if so yield. */
7140                     if( xYieldCurrentTask != pdFALSE )
7141                     {
7142                         portYIELD();
7143                     }
7144                 }
7145                 else
7146                 {
7147                     mtCOVERAGE_TEST_MARKER();
7148                 }
7149             }
7150             else
7151             {
7152                 mtCOVERAGE_TEST_MARKER();
7153             }
7154         }
7155         else
7156         {
7157             mtCOVERAGE_TEST_MARKER();
7158         }
7159
7160         traceRETURN_vTaskExitCritical();
7161     }
7162
7163 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7164 /*-----------------------------------------------------------*/
7165
7166 #if ( configNUMBER_OF_CORES > 1 )
7167
7168     void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus )
7169     {
7170         traceENTER_vTaskExitCriticalFromISR( uxSavedInterruptStatus );
7171
7172         if( xSchedulerRunning != pdFALSE )
7173         {
7174             /* If critical nesting count is zero then this function
7175              * does not match a previous call to vTaskEnterCritical(). */
7176             configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
7177
7178             if( portGET_CRITICAL_NESTING_COUNT() > 0U )
7179             {
7180                 portDECREMENT_CRITICAL_NESTING_COUNT();
7181
7182                 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
7183                 {
7184                     portRELEASE_ISR_LOCK();
7185                     portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
7186                 }
7187                 else
7188                 {
7189                     mtCOVERAGE_TEST_MARKER();
7190                 }
7191             }
7192             else
7193             {
7194                 mtCOVERAGE_TEST_MARKER();
7195             }
7196         }
7197         else
7198         {
7199             mtCOVERAGE_TEST_MARKER();
7200         }
7201
7202         traceRETURN_vTaskExitCriticalFromISR();
7203     }
7204
7205 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7206 /*-----------------------------------------------------------*/
7207
7208 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
7209
7210     static char * prvWriteNameToBuffer( char * pcBuffer,
7211                                         const char * pcTaskName )
7212     {
7213         size_t x;
7214
7215         /* Start by copying the entire string. */
7216         ( void ) strcpy( pcBuffer, pcTaskName );
7217
7218         /* Pad the end of the string with spaces to ensure columns line up when
7219          * printed out. */
7220         for( x = strlen( pcBuffer ); x < ( size_t ) ( ( size_t ) configMAX_TASK_NAME_LEN - 1U ); x++ )
7221         {
7222             pcBuffer[ x ] = ' ';
7223         }
7224
7225         /* Terminate. */
7226         pcBuffer[ x ] = ( char ) 0x00;
7227
7228         /* Return the new end of string. */
7229         return &( pcBuffer[ x ] );
7230     }
7231
7232 #endif /* ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
7233 /*-----------------------------------------------------------*/
7234
7235 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
7236
7237     void vTaskListTasks( char * pcWriteBuffer,
7238                          size_t uxBufferLength )
7239     {
7240         TaskStatus_t * pxTaskStatusArray;
7241         size_t uxConsumedBufferLength = 0;
7242         size_t uxCharsWrittenBySnprintf;
7243         int iSnprintfReturnValue;
7244         BaseType_t xOutputBufferFull = pdFALSE;
7245         UBaseType_t uxArraySize, x;
7246         char cStatus;
7247
7248         traceENTER_vTaskListTasks( pcWriteBuffer, uxBufferLength );
7249
7250         /*
7251          * PLEASE NOTE:
7252          *
7253          * This function is provided for convenience only, and is used by many
7254          * of the demo applications.  Do not consider it to be part of the
7255          * scheduler.
7256          *
7257          * vTaskListTasks() calls uxTaskGetSystemState(), then formats part of the
7258          * uxTaskGetSystemState() output into a human readable table that
7259          * displays task: names, states, priority, stack usage and task number.
7260          * Stack usage specified as the number of unused StackType_t words stack can hold
7261          * on top of stack - not the number of bytes.
7262          *
7263          * vTaskListTasks() has a dependency on the snprintf() C library function that
7264          * might bloat the code size, use a lot of stack, and provide different
7265          * results on different platforms.  An alternative, tiny, third party,
7266          * and limited functionality implementation of snprintf() is provided in
7267          * many of the FreeRTOS/Demo sub-directories in a file called
7268          * printf-stdarg.c (note printf-stdarg.c does not provide a full
7269          * snprintf() implementation!).
7270          *
7271          * It is recommended that production systems call uxTaskGetSystemState()
7272          * directly to get access to raw stats data, rather than indirectly
7273          * through a call to vTaskListTasks().
7274          */
7275
7276
7277         /* Make sure the write buffer does not contain a string. */
7278         *pcWriteBuffer = ( char ) 0x00;
7279
7280         /* Take a snapshot of the number of tasks in case it changes while this
7281          * function is executing. */
7282         uxArraySize = uxCurrentNumberOfTasks;
7283
7284         /* Allocate an array index for each task.  NOTE!  if
7285          * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
7286          * equate to NULL. */
7287         /* MISRA Ref 11.5.1 [Malloc memory assignment] */
7288         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
7289         /* coverity[misra_c_2012_rule_11_5_violation] */
7290         pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
7291
7292         if( pxTaskStatusArray != NULL )
7293         {
7294             /* Generate the (binary) data. */
7295             uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
7296
7297             /* Create a human readable table from the binary data. */
7298             for( x = 0; x < uxArraySize; x++ )
7299             {
7300                 switch( pxTaskStatusArray[ x ].eCurrentState )
7301                 {
7302                     case eRunning:
7303                         cStatus = tskRUNNING_CHAR;
7304                         break;
7305
7306                     case eReady:
7307                         cStatus = tskREADY_CHAR;
7308                         break;
7309
7310                     case eBlocked:
7311                         cStatus = tskBLOCKED_CHAR;
7312                         break;
7313
7314                     case eSuspended:
7315                         cStatus = tskSUSPENDED_CHAR;
7316                         break;
7317
7318                     case eDeleted:
7319                         cStatus = tskDELETED_CHAR;
7320                         break;
7321
7322                     case eInvalid: /* Fall through. */
7323                     default:       /* Should not get here, but it is included
7324                                     * to prevent static checking errors. */
7325                         cStatus = ( char ) 0x00;
7326                         break;
7327                 }
7328
7329                 /* Is there enough space in the buffer to hold task name? */
7330                 if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
7331                 {
7332                     /* Write the task name to the string, padding with spaces so it
7333                      * can be printed in tabular form more easily. */
7334                     pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
7335                     /* Do not count the terminating null character. */
7336                     uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1U );
7337
7338                     /* Is there space left in the buffer? -1 is done because snprintf
7339                      * writes a terminating null character. So we are essentially
7340                      * checking if the buffer has space to write at least one non-null
7341                      * character. */
7342                     if( uxConsumedBufferLength < ( uxBufferLength - 1U ) )
7343                     {
7344                         /* Write the rest of the string. */
7345                         #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
7346                             /* MISRA Ref 21.6.1 [snprintf for utility] */
7347                             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7348                             /* coverity[misra_c_2012_rule_21_6_violation] */
7349                             iSnprintfReturnValue = snprintf( pcWriteBuffer,
7350                                                              uxBufferLength - uxConsumedBufferLength,
7351                                                              "\t%c\t%u\t%u\t%u\t0x%x\r\n",
7352                                                              cStatus,
7353                                                              ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
7354                                                              ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
7355                                                              ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber,
7356                                                              ( unsigned int ) pxTaskStatusArray[ x ].uxCoreAffinityMask );
7357                         #else /* ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
7358                             /* MISRA Ref 21.6.1 [snprintf for utility] */
7359                             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7360                             /* coverity[misra_c_2012_rule_21_6_violation] */
7361                             iSnprintfReturnValue = snprintf( pcWriteBuffer,
7362                                                              uxBufferLength - uxConsumedBufferLength,
7363                                                              "\t%c\t%u\t%u\t%u\r\n",
7364                                                              cStatus,
7365                                                              ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
7366                                                              ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
7367                                                              ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
7368                         #endif /* ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
7369                         uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
7370
7371                         uxConsumedBufferLength += uxCharsWrittenBySnprintf;
7372                         pcWriteBuffer += uxCharsWrittenBySnprintf;
7373                     }
7374                     else
7375                     {
7376                         xOutputBufferFull = pdTRUE;
7377                     }
7378                 }
7379                 else
7380                 {
7381                     xOutputBufferFull = pdTRUE;
7382                 }
7383
7384                 if( xOutputBufferFull == pdTRUE )
7385                 {
7386                     break;
7387                 }
7388             }
7389
7390             /* Free the array again.  NOTE!  If configSUPPORT_DYNAMIC_ALLOCATION
7391              * is 0 then vPortFree() will be #defined to nothing. */
7392             vPortFree( pxTaskStatusArray );
7393         }
7394         else
7395         {
7396             mtCOVERAGE_TEST_MARKER();
7397         }
7398
7399         traceRETURN_vTaskListTasks();
7400     }
7401
7402 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
7403 /*----------------------------------------------------------*/
7404
7405 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configUSE_TRACE_FACILITY == 1 ) )
7406
7407     void vTaskGetRunTimeStatistics( char * pcWriteBuffer,
7408                                     size_t uxBufferLength )
7409     {
7410         TaskStatus_t * pxTaskStatusArray;
7411         size_t uxConsumedBufferLength = 0;
7412         size_t uxCharsWrittenBySnprintf;
7413         int iSnprintfReturnValue;
7414         BaseType_t xOutputBufferFull = pdFALSE;
7415         UBaseType_t uxArraySize, x;
7416         configRUN_TIME_COUNTER_TYPE ulTotalTime = 0;
7417         configRUN_TIME_COUNTER_TYPE ulStatsAsPercentage;
7418
7419         traceENTER_vTaskGetRunTimeStatistics( pcWriteBuffer, uxBufferLength );
7420
7421         /*
7422          * PLEASE NOTE:
7423          *
7424          * This function is provided for convenience only, and is used by many
7425          * of the demo applications.  Do not consider it to be part of the
7426          * scheduler.
7427          *
7428          * vTaskGetRunTimeStatistics() calls uxTaskGetSystemState(), then formats part
7429          * of the uxTaskGetSystemState() output into a human readable table that
7430          * displays the amount of time each task has spent in the Running state
7431          * in both absolute and percentage terms.
7432          *
7433          * vTaskGetRunTimeStatistics() has a dependency on the snprintf() C library
7434          * function that might bloat the code size, use a lot of stack, and
7435          * provide different results on different platforms.  An alternative,
7436          * tiny, third party, and limited functionality implementation of
7437          * snprintf() is provided in many of the FreeRTOS/Demo sub-directories in
7438          * a file called printf-stdarg.c (note printf-stdarg.c does not provide
7439          * a full snprintf() implementation!).
7440          *
7441          * It is recommended that production systems call uxTaskGetSystemState()
7442          * directly to get access to raw stats data, rather than indirectly
7443          * through a call to vTaskGetRunTimeStatistics().
7444          */
7445
7446         /* Make sure the write buffer does not contain a string. */
7447         *pcWriteBuffer = ( char ) 0x00;
7448
7449         /* Take a snapshot of the number of tasks in case it changes while this
7450          * function is executing. */
7451         uxArraySize = uxCurrentNumberOfTasks;
7452
7453         /* Allocate an array index for each task.  NOTE!  If
7454          * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
7455          * equate to NULL. */
7456         /* MISRA Ref 11.5.1 [Malloc memory assignment] */
7457         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
7458         /* coverity[misra_c_2012_rule_11_5_violation] */
7459         pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
7460
7461         if( pxTaskStatusArray != NULL )
7462         {
7463             /* Generate the (binary) data. */
7464             uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
7465
7466             /* For percentage calculations. */
7467             ulTotalTime /= ( ( configRUN_TIME_COUNTER_TYPE ) 100UL );
7468
7469             /* Avoid divide by zero errors. */
7470             if( ulTotalTime > 0UL )
7471             {
7472                 /* Create a human readable table from the binary data. */
7473                 for( x = 0; x < uxArraySize; x++ )
7474                 {
7475                     /* What percentage of the total run time has the task used?
7476                      * This will always be rounded down to the nearest integer.
7477                      * ulTotalRunTime has already been divided by 100. */
7478                     ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
7479
7480                     /* Is there enough space in the buffer to hold task name? */
7481                     if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
7482                     {
7483                         /* Write the task name to the string, padding with
7484                          * spaces so it can be printed in tabular form more
7485                          * easily. */
7486                         pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
7487                         /* Do not count the terminating null character. */
7488                         uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1U );
7489
7490                         /* Is there space left in the buffer? -1 is done because snprintf
7491                          * writes a terminating null character. So we are essentially
7492                          * checking if the buffer has space to write at least one non-null
7493                          * character. */
7494                         if( uxConsumedBufferLength < ( uxBufferLength - 1U ) )
7495                         {
7496                             if( ulStatsAsPercentage > 0UL )
7497                             {
7498                                 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
7499                                 {
7500                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7501                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7502                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7503                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7504                                                                      uxBufferLength - uxConsumedBufferLength,
7505                                                                      "\t%lu\t\t%lu%%\r\n",
7506                                                                      pxTaskStatusArray[ x ].ulRunTimeCounter,
7507                                                                      ulStatsAsPercentage );
7508                                 }
7509                                 #else /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7510                                 {
7511                                     /* sizeof( int ) == sizeof( long ) so a smaller
7512                                      * printf() library can be used. */
7513                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7514                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7515                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7516                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7517                                                                      uxBufferLength - uxConsumedBufferLength,
7518                                                                      "\t%u\t\t%u%%\r\n",
7519                                                                      ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter,
7520                                                                      ( unsigned int ) ulStatsAsPercentage );
7521                                 }
7522                                 #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7523                             }
7524                             else
7525                             {
7526                                 /* If the percentage is zero here then the task has
7527                                  * consumed less than 1% of the total run time. */
7528                                 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
7529                                 {
7530                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7531                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7532                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7533                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7534                                                                      uxBufferLength - uxConsumedBufferLength,
7535                                                                      "\t%lu\t\t<1%%\r\n",
7536                                                                      pxTaskStatusArray[ x ].ulRunTimeCounter );
7537                                 }
7538                                 #else
7539                                 {
7540                                     /* sizeof( int ) == sizeof( long ) so a smaller
7541                                      * printf() library can be used. */
7542                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7543                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7544                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7545                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7546                                                                      uxBufferLength - uxConsumedBufferLength,
7547                                                                      "\t%u\t\t<1%%\r\n",
7548                                                                      ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
7549                                 }
7550                                 #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7551                             }
7552
7553                             uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
7554                             uxConsumedBufferLength += uxCharsWrittenBySnprintf;
7555                             pcWriteBuffer += uxCharsWrittenBySnprintf;
7556                         }
7557                         else
7558                         {
7559                             xOutputBufferFull = pdTRUE;
7560                         }
7561                     }
7562                     else
7563                     {
7564                         xOutputBufferFull = pdTRUE;
7565                     }
7566
7567                     if( xOutputBufferFull == pdTRUE )
7568                     {
7569                         break;
7570                     }
7571                 }
7572             }
7573             else
7574             {
7575                 mtCOVERAGE_TEST_MARKER();
7576             }
7577
7578             /* Free the array again.  NOTE!  If configSUPPORT_DYNAMIC_ALLOCATION
7579              * is 0 then vPortFree() will be #defined to nothing. */
7580             vPortFree( pxTaskStatusArray );
7581         }
7582         else
7583         {
7584             mtCOVERAGE_TEST_MARKER();
7585         }
7586
7587         traceRETURN_vTaskGetRunTimeStatistics();
7588     }
7589
7590 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
7591 /*-----------------------------------------------------------*/
7592
7593 TickType_t uxTaskResetEventItemValue( void )
7594 {
7595     TickType_t uxReturn;
7596
7597     traceENTER_uxTaskResetEventItemValue();
7598
7599     uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
7600
7601     /* Reset the event list item to its normal value - so it can be used with
7602      * queues and semaphores. */
7603     listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) );
7604
7605     traceRETURN_uxTaskResetEventItemValue( uxReturn );
7606
7607     return uxReturn;
7608 }
7609 /*-----------------------------------------------------------*/
7610
7611 #if ( configUSE_MUTEXES == 1 )
7612
7613     TaskHandle_t pvTaskIncrementMutexHeldCount( void )
7614     {
7615         TCB_t * pxTCB;
7616
7617         traceENTER_pvTaskIncrementMutexHeldCount();
7618
7619         pxTCB = pxCurrentTCB;
7620
7621         /* If xSemaphoreCreateMutex() is called before any tasks have been created
7622          * then pxCurrentTCB will be NULL. */
7623         if( pxTCB != NULL )
7624         {
7625             ( pxTCB->uxMutexesHeld )++;
7626         }
7627
7628         traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB );
7629
7630         return pxTCB;
7631     }
7632
7633 #endif /* configUSE_MUTEXES */
7634 /*-----------------------------------------------------------*/
7635
7636 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7637
7638     uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
7639                                       BaseType_t xClearCountOnExit,
7640                                       TickType_t xTicksToWait )
7641     {
7642         uint32_t ulReturn;
7643         BaseType_t xAlreadyYielded;
7644
7645         traceENTER_ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
7646
7647         configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7648
7649         taskENTER_CRITICAL();
7650
7651         /* Only block if the notification count is not already non-zero. */
7652         if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL )
7653         {
7654             /* Mark this task as waiting for a notification. */
7655             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
7656
7657             if( xTicksToWait > ( TickType_t ) 0 )
7658             {
7659                 traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
7660
7661                 /* We MUST suspend the scheduler before exiting the critical
7662                  * section (i.e. before enabling interrupts).
7663                  *
7664                  * If we do not do so, a notification sent from an ISR, which
7665                  * happens after exiting the critical section and before
7666                  * suspending the scheduler, will get lost. The sequence of
7667                  * events will be:
7668                  * 1. Exit critical section.
7669                  * 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7670                  *    task to the Ready list.
7671                  * 3. Suspend scheduler.
7672                  * 4. prvAddCurrentTaskToDelayedList moves the task to the
7673                  *    delayed or suspended list.
7674                  * 5. Resume scheduler does not touch the task (because it is
7675                  *    not on the pendingReady list), effectively losing the
7676                  *    notification from the ISR.
7677                  *
7678                  * The same does not happen when we suspend the scheduler before
7679                  * exiting the critical section. The sequence of events in this
7680                  * case will be:
7681                  * 1. Suspend scheduler.
7682                  * 2. Exit critical section.
7683                  * 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7684                  *    task to the pendingReady list as the scheduler is
7685                  *    suspended.
7686                  * 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
7687                  *    suspended list. Note that this operation does not nullify
7688                  *    the add to pendingReady list done in the above step because
7689                  *    a different list item, namely xEventListItem, is used for
7690                  *    adding the task to the pendingReady list. In other words,
7691                  *    the task still remains on the pendingReady list.
7692                  * 5. Resume scheduler moves the task from pendingReady list to
7693                  *    the Ready list.
7694                  */
7695                 vTaskSuspendAll();
7696                 {
7697                     taskEXIT_CRITICAL();
7698
7699                     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
7700                 }
7701                 xAlreadyYielded = xTaskResumeAll();
7702
7703                 if( xAlreadyYielded == pdFALSE )
7704                 {
7705                     taskYIELD_WITHIN_API();
7706                 }
7707                 else
7708                 {
7709                     mtCOVERAGE_TEST_MARKER();
7710                 }
7711             }
7712             else
7713             {
7714                 taskEXIT_CRITICAL();
7715             }
7716         }
7717         else
7718         {
7719             taskEXIT_CRITICAL();
7720         }
7721
7722         taskENTER_CRITICAL();
7723         {
7724             traceTASK_NOTIFY_TAKE( uxIndexToWaitOn );
7725             ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
7726
7727             if( ulReturn != 0UL )
7728             {
7729                 if( xClearCountOnExit != pdFALSE )
7730                 {
7731                     pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ( uint32_t ) 0UL;
7732                 }
7733                 else
7734                 {
7735                     pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ulReturn - ( uint32_t ) 1;
7736                 }
7737             }
7738             else
7739             {
7740                 mtCOVERAGE_TEST_MARKER();
7741             }
7742
7743             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
7744         }
7745         taskEXIT_CRITICAL();
7746
7747         traceRETURN_ulTaskGenericNotifyTake( ulReturn );
7748
7749         return ulReturn;
7750     }
7751
7752 #endif /* configUSE_TASK_NOTIFICATIONS */
7753 /*-----------------------------------------------------------*/
7754
7755 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7756
7757     BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
7758                                        uint32_t ulBitsToClearOnEntry,
7759                                        uint32_t ulBitsToClearOnExit,
7760                                        uint32_t * pulNotificationValue,
7761                                        TickType_t xTicksToWait )
7762     {
7763         BaseType_t xReturn, xAlreadyYielded;
7764
7765         traceENTER_xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
7766
7767         configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7768
7769         taskENTER_CRITICAL();
7770
7771         /* Only block if a notification is not already pending. */
7772         if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
7773         {
7774             /* Clear bits in the task's notification value as bits may get
7775              * set  by the notifying task or interrupt.  This can be used to
7776              * clear the value to zero. */
7777             pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
7778
7779             /* Mark this task as waiting for a notification. */
7780             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
7781
7782             if( xTicksToWait > ( TickType_t ) 0 )
7783             {
7784                 traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
7785
7786                 /* We MUST suspend the scheduler before exiting the critical
7787                  * section (i.e. before enabling interrupts).
7788                  *
7789                  * If we do not do so, a notification sent from an ISR, which
7790                  * happens after exiting the critical section and before
7791                  * suspending the scheduler, will get lost. The sequence of
7792                  * events will be:
7793                  * 1. Exit critical section.
7794                  * 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7795                  *    task to the Ready list.
7796                  * 3. Suspend scheduler.
7797                  * 4. prvAddCurrentTaskToDelayedList moves the task to the
7798                  *    delayed or suspended list.
7799                  * 5. Resume scheduler does not touch the task (because it is
7800                  *    not on the pendingReady list), effectively losing the
7801                  *    notification from the ISR.
7802                  *
7803                  * The same does not happen when we suspend the scheduler before
7804                  * exiting the critical section. The sequence of events in this
7805                  * case will be:
7806                  * 1. Suspend scheduler.
7807                  * 2. Exit critical section.
7808                  * 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7809                  *    task to the pendingReady list as the scheduler is
7810                  *    suspended.
7811                  * 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
7812                  *    suspended list. Note that this operation does not nullify
7813                  *    the add to pendingReady list done in the above step because
7814                  *    a different list item, namely xEventListItem, is used for
7815                  *    adding the task to the pendingReady list. In other words,
7816                  *    the task still remains on the pendingReady list.
7817                  * 5. Resume scheduler moves the task from pendingReady list to
7818                  *    the Ready list.
7819                  */
7820                 vTaskSuspendAll();
7821                 {
7822                     taskEXIT_CRITICAL();
7823
7824                     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
7825                 }
7826                 xAlreadyYielded = xTaskResumeAll();
7827
7828                 if( xAlreadyYielded == pdFALSE )
7829                 {
7830                     taskYIELD_WITHIN_API();
7831                 }
7832                 else
7833                 {
7834                     mtCOVERAGE_TEST_MARKER();
7835                 }
7836             }
7837             else
7838             {
7839                 taskEXIT_CRITICAL();
7840             }
7841         }
7842         else
7843         {
7844             taskEXIT_CRITICAL();
7845         }
7846
7847         taskENTER_CRITICAL();
7848         {
7849             traceTASK_NOTIFY_WAIT( uxIndexToWaitOn );
7850
7851             if( pulNotificationValue != NULL )
7852             {
7853                 /* Output the current notification value, which may or may not
7854                  * have changed. */
7855                 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
7856             }
7857
7858             /* If ucNotifyValue is set then either the task never entered the
7859              * blocked state (because a notification was already pending) or the
7860              * task unblocked because of a notification.  Otherwise the task
7861              * unblocked because of a timeout. */
7862             if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
7863             {
7864                 /* A notification was not received. */
7865                 xReturn = pdFALSE;
7866             }
7867             else
7868             {
7869                 /* A notification was already pending or a notification was
7870                  * received while the task was waiting. */
7871                 pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnExit;
7872                 xReturn = pdTRUE;
7873             }
7874
7875             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
7876         }
7877         taskEXIT_CRITICAL();
7878
7879         traceRETURN_xTaskGenericNotifyWait( xReturn );
7880
7881         return xReturn;
7882     }
7883
7884 #endif /* configUSE_TASK_NOTIFICATIONS */
7885 /*-----------------------------------------------------------*/
7886
7887 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7888
7889     BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
7890                                    UBaseType_t uxIndexToNotify,
7891                                    uint32_t ulValue,
7892                                    eNotifyAction eAction,
7893                                    uint32_t * pulPreviousNotificationValue )
7894     {
7895         TCB_t * pxTCB;
7896         BaseType_t xReturn = pdPASS;
7897         uint8_t ucOriginalNotifyState;
7898
7899         traceENTER_xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
7900
7901         configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7902         configASSERT( xTaskToNotify );
7903         pxTCB = xTaskToNotify;
7904
7905         taskENTER_CRITICAL();
7906         {
7907             if( pulPreviousNotificationValue != NULL )
7908             {
7909                 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
7910             }
7911
7912             ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
7913
7914             pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
7915
7916             switch( eAction )
7917             {
7918                 case eSetBits:
7919                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
7920                     break;
7921
7922                 case eIncrement:
7923                     ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
7924                     break;
7925
7926                 case eSetValueWithOverwrite:
7927                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7928                     break;
7929
7930                 case eSetValueWithoutOverwrite:
7931
7932                     if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
7933                     {
7934                         pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7935                     }
7936                     else
7937                     {
7938                         /* The value could not be written to the task. */
7939                         xReturn = pdFAIL;
7940                     }
7941
7942                     break;
7943
7944                 case eNoAction:
7945
7946                     /* The task is being notified without its notify value being
7947                      * updated. */
7948                     break;
7949
7950                 default:
7951
7952                     /* Should not get here if all enums are handled.
7953                      * Artificially force an assert by testing a value the
7954                      * compiler can't assume is const. */
7955                     configASSERT( xTickCount == ( TickType_t ) 0 );
7956
7957                     break;
7958             }
7959
7960             traceTASK_NOTIFY( uxIndexToNotify );
7961
7962             /* If the task is in the blocked state specifically to wait for a
7963              * notification then unblock it now. */
7964             if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
7965             {
7966                 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
7967                 prvAddTaskToReadyList( pxTCB );
7968
7969                 /* The task should not have been on an event list. */
7970                 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
7971
7972                 #if ( configUSE_TICKLESS_IDLE != 0 )
7973                 {
7974                     /* If a task is blocked waiting for a notification then
7975                      * xNextTaskUnblockTime might be set to the blocked task's time
7976                      * out time.  If the task is unblocked for a reason other than
7977                      * a timeout xNextTaskUnblockTime is normally left unchanged,
7978                      * because it will automatically get reset to a new value when
7979                      * the tick count equals xNextTaskUnblockTime.  However if
7980                      * tickless idling is used it might be more important to enter
7981                      * sleep mode at the earliest possible time - so reset
7982                      * xNextTaskUnblockTime here to ensure it is updated at the
7983                      * earliest possible time. */
7984                     prvResetNextTaskUnblockTime();
7985                 }
7986                 #endif
7987
7988                 /* Check if the notified task has a priority above the currently
7989                  * executing task. */
7990                 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
7991             }
7992             else
7993             {
7994                 mtCOVERAGE_TEST_MARKER();
7995             }
7996         }
7997         taskEXIT_CRITICAL();
7998
7999         traceRETURN_xTaskGenericNotify( xReturn );
8000
8001         return xReturn;
8002     }
8003
8004 #endif /* configUSE_TASK_NOTIFICATIONS */
8005 /*-----------------------------------------------------------*/
8006
8007 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8008
8009     BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
8010                                           UBaseType_t uxIndexToNotify,
8011                                           uint32_t ulValue,
8012                                           eNotifyAction eAction,
8013                                           uint32_t * pulPreviousNotificationValue,
8014                                           BaseType_t * pxHigherPriorityTaskWoken )
8015     {
8016         TCB_t * pxTCB;
8017         uint8_t ucOriginalNotifyState;
8018         BaseType_t xReturn = pdPASS;
8019         UBaseType_t uxSavedInterruptStatus;
8020
8021         traceENTER_xTaskGenericNotifyFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken );
8022
8023         configASSERT( xTaskToNotify );
8024         configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8025
8026         /* RTOS ports that support interrupt nesting have the concept of a
8027          * maximum  system call (or maximum API call) interrupt priority.
8028          * Interrupts that are  above the maximum system call priority are keep
8029          * permanently enabled, even when the RTOS kernel is in a critical section,
8030          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
8031          * is defined in FreeRTOSConfig.h then
8032          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
8033          * failure if a FreeRTOS API function is called from an interrupt that has
8034          * been assigned a priority above the configured maximum system call
8035          * priority.  Only FreeRTOS functions that end in FromISR can be called
8036          * from interrupts  that have been assigned a priority at or (logically)
8037          * below the maximum system call interrupt priority.  FreeRTOS maintains a
8038          * separate interrupt safe API to ensure interrupt entry is as fast and as
8039          * simple as possible.  More information (albeit Cortex-M specific) is
8040          * provided on the following link:
8041          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
8042         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
8043
8044         pxTCB = xTaskToNotify;
8045
8046         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
8047         {
8048             if( pulPreviousNotificationValue != NULL )
8049             {
8050                 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
8051             }
8052
8053             ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
8054             pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
8055
8056             switch( eAction )
8057             {
8058                 case eSetBits:
8059                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
8060                     break;
8061
8062                 case eIncrement:
8063                     ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
8064                     break;
8065
8066                 case eSetValueWithOverwrite:
8067                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
8068                     break;
8069
8070                 case eSetValueWithoutOverwrite:
8071
8072                     if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
8073                     {
8074                         pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
8075                     }
8076                     else
8077                     {
8078                         /* The value could not be written to the task. */
8079                         xReturn = pdFAIL;
8080                     }
8081
8082                     break;
8083
8084                 case eNoAction:
8085
8086                     /* The task is being notified without its notify value being
8087                      * updated. */
8088                     break;
8089
8090                 default:
8091
8092                     /* Should not get here if all enums are handled.
8093                      * Artificially force an assert by testing a value the
8094                      * compiler can't assume is const. */
8095                     configASSERT( xTickCount == ( TickType_t ) 0 );
8096                     break;
8097             }
8098
8099             traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
8100
8101             /* If the task is in the blocked state specifically to wait for a
8102              * notification then unblock it now. */
8103             if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
8104             {
8105                 /* The task should not have been on an event list. */
8106                 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
8107
8108                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
8109                 {
8110                     listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
8111                     prvAddTaskToReadyList( pxTCB );
8112                 }
8113                 else
8114                 {
8115                     /* The delayed and ready lists cannot be accessed, so hold
8116                      * this task pending until the scheduler is resumed. */
8117                     listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
8118                 }
8119
8120                 #if ( configNUMBER_OF_CORES == 1 )
8121                 {
8122                     if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
8123                     {
8124                         /* The notified task has a priority above the currently
8125                          * executing task so a yield is required. */
8126                         if( pxHigherPriorityTaskWoken != NULL )
8127                         {
8128                             *pxHigherPriorityTaskWoken = pdTRUE;
8129                         }
8130
8131                         /* Mark that a yield is pending in case the user is not
8132                          * using the "xHigherPriorityTaskWoken" parameter to an ISR
8133                          * safe FreeRTOS function. */
8134                         xYieldPendings[ 0 ] = pdTRUE;
8135                     }
8136                     else
8137                     {
8138                         mtCOVERAGE_TEST_MARKER();
8139                     }
8140                 }
8141                 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
8142                 {
8143                     #if ( configUSE_PREEMPTION == 1 )
8144                     {
8145                         prvYieldForTask( pxTCB );
8146
8147                         if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
8148                         {
8149                             if( pxHigherPriorityTaskWoken != NULL )
8150                             {
8151                                 *pxHigherPriorityTaskWoken = pdTRUE;
8152                             }
8153                         }
8154                     }
8155                     #endif /* if ( configUSE_PREEMPTION == 1 ) */
8156                 }
8157                 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
8158             }
8159         }
8160         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
8161
8162         traceRETURN_xTaskGenericNotifyFromISR( xReturn );
8163
8164         return xReturn;
8165     }
8166
8167 #endif /* configUSE_TASK_NOTIFICATIONS */
8168 /*-----------------------------------------------------------*/
8169
8170 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8171
8172     void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
8173                                         UBaseType_t uxIndexToNotify,
8174                                         BaseType_t * pxHigherPriorityTaskWoken )
8175     {
8176         TCB_t * pxTCB;
8177         uint8_t ucOriginalNotifyState;
8178         UBaseType_t uxSavedInterruptStatus;
8179
8180         traceENTER_vTaskGenericNotifyGiveFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken );
8181
8182         configASSERT( xTaskToNotify );
8183         configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8184
8185         /* RTOS ports that support interrupt nesting have the concept of a
8186          * maximum  system call (or maximum API call) interrupt priority.
8187          * Interrupts that are  above the maximum system call priority are keep
8188          * permanently enabled, even when the RTOS kernel is in a critical section,
8189          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
8190          * is defined in FreeRTOSConfig.h then
8191          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
8192          * failure if a FreeRTOS API function is called from an interrupt that has
8193          * been assigned a priority above the configured maximum system call
8194          * priority.  Only FreeRTOS functions that end in FromISR can be called
8195          * from interrupts  that have been assigned a priority at or (logically)
8196          * below the maximum system call interrupt priority.  FreeRTOS maintains a
8197          * separate interrupt safe API to ensure interrupt entry is as fast and as
8198          * simple as possible.  More information (albeit Cortex-M specific) is
8199          * provided on the following link:
8200          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
8201         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
8202
8203         pxTCB = xTaskToNotify;
8204
8205         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
8206         {
8207             ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
8208             pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
8209
8210             /* 'Giving' is equivalent to incrementing a count in a counting
8211              * semaphore. */
8212             ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
8213
8214             traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
8215
8216             /* If the task is in the blocked state specifically to wait for a
8217              * notification then unblock it now. */
8218             if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
8219             {
8220                 /* The task should not have been on an event list. */
8221                 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
8222
8223                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
8224                 {
8225                     listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
8226                     prvAddTaskToReadyList( pxTCB );
8227                 }
8228                 else
8229                 {
8230                     /* The delayed and ready lists cannot be accessed, so hold
8231                      * this task pending until the scheduler is resumed. */
8232                     listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
8233                 }
8234
8235                 #if ( configNUMBER_OF_CORES == 1 )
8236                 {
8237                     if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
8238                     {
8239                         /* The notified task has a priority above the currently
8240                          * executing task so a yield is required. */
8241                         if( pxHigherPriorityTaskWoken != NULL )
8242                         {
8243                             *pxHigherPriorityTaskWoken = pdTRUE;
8244                         }
8245
8246                         /* Mark that a yield is pending in case the user is not
8247                          * using the "xHigherPriorityTaskWoken" parameter in an ISR
8248                          * safe FreeRTOS function. */
8249                         xYieldPendings[ 0 ] = pdTRUE;
8250                     }
8251                     else
8252                     {
8253                         mtCOVERAGE_TEST_MARKER();
8254                     }
8255                 }
8256                 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
8257                 {
8258                     #if ( configUSE_PREEMPTION == 1 )
8259                     {
8260                         prvYieldForTask( pxTCB );
8261
8262                         if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
8263                         {
8264                             if( pxHigherPriorityTaskWoken != NULL )
8265                             {
8266                                 *pxHigherPriorityTaskWoken = pdTRUE;
8267                             }
8268                         }
8269                     }
8270                     #endif /* #if ( configUSE_PREEMPTION == 1 ) */
8271                 }
8272                 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
8273             }
8274         }
8275         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
8276
8277         traceRETURN_vTaskGenericNotifyGiveFromISR();
8278     }
8279
8280 #endif /* configUSE_TASK_NOTIFICATIONS */
8281 /*-----------------------------------------------------------*/
8282
8283 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8284
8285     BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
8286                                              UBaseType_t uxIndexToClear )
8287     {
8288         TCB_t * pxTCB;
8289         BaseType_t xReturn;
8290
8291         traceENTER_xTaskGenericNotifyStateClear( xTask, uxIndexToClear );
8292
8293         configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8294
8295         /* If null is passed in here then it is the calling task that is having
8296          * its notification state cleared. */
8297         pxTCB = prvGetTCBFromHandle( xTask );
8298
8299         taskENTER_CRITICAL();
8300         {
8301             if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
8302             {
8303                 pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
8304                 xReturn = pdPASS;
8305             }
8306             else
8307             {
8308                 xReturn = pdFAIL;
8309             }
8310         }
8311         taskEXIT_CRITICAL();
8312
8313         traceRETURN_xTaskGenericNotifyStateClear( xReturn );
8314
8315         return xReturn;
8316     }
8317
8318 #endif /* configUSE_TASK_NOTIFICATIONS */
8319 /*-----------------------------------------------------------*/
8320
8321 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8322
8323     uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
8324                                             UBaseType_t uxIndexToClear,
8325                                             uint32_t ulBitsToClear )
8326     {
8327         TCB_t * pxTCB;
8328         uint32_t ulReturn;
8329
8330         traceENTER_ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear );
8331
8332         configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8333
8334         /* If null is passed in here then it is the calling task that is having
8335          * its notification state cleared. */
8336         pxTCB = prvGetTCBFromHandle( xTask );
8337
8338         taskENTER_CRITICAL();
8339         {
8340             /* Return the notification as it was before the bits were cleared,
8341              * then clear the bit mask. */
8342             ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
8343             pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
8344         }
8345         taskEXIT_CRITICAL();
8346
8347         traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );
8348
8349         return ulReturn;
8350     }
8351
8352 #endif /* configUSE_TASK_NOTIFICATIONS */
8353 /*-----------------------------------------------------------*/
8354
8355 #if ( configGENERATE_RUN_TIME_STATS == 1 )
8356
8357     configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask )
8358     {
8359         TCB_t * pxTCB;
8360
8361         traceENTER_ulTaskGetRunTimeCounter( xTask );
8362
8363         pxTCB = prvGetTCBFromHandle( xTask );
8364
8365         traceRETURN_ulTaskGetRunTimeCounter( pxTCB->ulRunTimeCounter );
8366
8367         return pxTCB->ulRunTimeCounter;
8368     }
8369
8370 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
8371 /*-----------------------------------------------------------*/
8372
8373 #if ( configGENERATE_RUN_TIME_STATS == 1 )
8374
8375     configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask )
8376     {
8377         TCB_t * pxTCB;
8378         configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
8379
8380         traceENTER_ulTaskGetRunTimePercent( xTask );
8381
8382         ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
8383
8384         /* For percentage calculations. */
8385         ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
8386
8387         /* Avoid divide by zero errors. */
8388         if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
8389         {
8390             pxTCB = prvGetTCBFromHandle( xTask );
8391             ulReturn = pxTCB->ulRunTimeCounter / ulTotalTime;
8392         }
8393         else
8394         {
8395             ulReturn = 0;
8396         }
8397
8398         traceRETURN_ulTaskGetRunTimePercent( ulReturn );
8399
8400         return ulReturn;
8401     }
8402
8403 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
8404 /*-----------------------------------------------------------*/
8405
8406 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
8407
8408     configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
8409     {
8410         configRUN_TIME_COUNTER_TYPE ulReturn = 0;
8411         BaseType_t i;
8412
8413         traceENTER_ulTaskGetIdleRunTimeCounter();
8414
8415         for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
8416         {
8417             ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter;
8418         }
8419
8420         traceRETURN_ulTaskGetIdleRunTimeCounter( ulReturn );
8421
8422         return ulReturn;
8423     }
8424
8425 #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
8426 /*-----------------------------------------------------------*/
8427
8428 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
8429
8430     configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
8431     {
8432         configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
8433         configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0;
8434         BaseType_t i;
8435
8436         traceENTER_ulTaskGetIdleRunTimePercent();
8437
8438         ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES;
8439
8440         /* For percentage calculations. */
8441         ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
8442
8443         /* Avoid divide by zero errors. */
8444         if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
8445         {
8446             for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
8447             {
8448                 ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter;
8449             }
8450
8451             ulReturn = ulRunTimeCounter / ulTotalTime;
8452         }
8453         else
8454         {
8455             ulReturn = 0;
8456         }
8457
8458         traceRETURN_ulTaskGetIdleRunTimePercent( ulReturn );
8459
8460         return ulReturn;
8461     }
8462
8463 #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
8464 /*-----------------------------------------------------------*/
8465
8466 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
8467                                             const BaseType_t xCanBlockIndefinitely )
8468 {
8469     TickType_t xTimeToWake;
8470     const TickType_t xConstTickCount = xTickCount;
8471     List_t * const pxDelayedList = pxDelayedTaskList;
8472     List_t * const pxOverflowDelayedList = pxOverflowDelayedTaskList;
8473
8474     #if ( INCLUDE_xTaskAbortDelay == 1 )
8475     {
8476         /* About to enter a delayed list, so ensure the ucDelayAborted flag is
8477          * reset to pdFALSE so it can be detected as having been set to pdTRUE
8478          * when the task leaves the Blocked state. */
8479         pxCurrentTCB->ucDelayAborted = pdFALSE;
8480     }
8481     #endif
8482
8483     /* Remove the task from the ready list before adding it to the blocked list
8484      * as the same list item is used for both lists. */
8485     if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
8486     {
8487         /* The current task must be in a ready list, so there is no need to
8488          * check, and the port reset macro can be called directly. */
8489         portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
8490     }
8491     else
8492     {
8493         mtCOVERAGE_TEST_MARKER();
8494     }
8495
8496     #if ( INCLUDE_vTaskSuspend == 1 )
8497     {
8498         if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
8499         {
8500             /* Add the task to the suspended task list instead of a delayed task
8501              * list to ensure it is not woken by a timing event.  It will block
8502              * indefinitely. */
8503             listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
8504         }
8505         else
8506         {
8507             /* Calculate the time at which the task should be woken if the event
8508              * does not occur.  This may overflow but this doesn't matter, the
8509              * kernel will manage it correctly. */
8510             xTimeToWake = xConstTickCount + xTicksToWait;
8511
8512             /* The list item will be inserted in wake time order. */
8513             listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
8514
8515             if( xTimeToWake < xConstTickCount )
8516             {
8517                 /* Wake time has overflowed.  Place this item in the overflow
8518                  * list. */
8519                 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
8520                 vListInsert( pxOverflowDelayedList, &( pxCurrentTCB->xStateListItem ) );
8521             }
8522             else
8523             {
8524                 /* The wake time has not overflowed, so the current block list
8525                  * is used. */
8526                 traceMOVED_TASK_TO_DELAYED_LIST();
8527                 vListInsert( pxDelayedList, &( pxCurrentTCB->xStateListItem ) );
8528
8529                 /* If the task entering the blocked state was placed at the
8530                  * head of the list of blocked tasks then xNextTaskUnblockTime
8531                  * needs to be updated too. */
8532                 if( xTimeToWake < xNextTaskUnblockTime )
8533                 {
8534                     xNextTaskUnblockTime = xTimeToWake;
8535                 }
8536                 else
8537                 {
8538                     mtCOVERAGE_TEST_MARKER();
8539                 }
8540             }
8541         }
8542     }
8543     #else /* INCLUDE_vTaskSuspend */
8544     {
8545         /* Calculate the time at which the task should be woken if the event
8546          * does not occur.  This may overflow but this doesn't matter, the kernel
8547          * will manage it correctly. */
8548         xTimeToWake = xConstTickCount + xTicksToWait;
8549
8550         /* The list item will be inserted in wake time order. */
8551         listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
8552
8553         if( xTimeToWake < xConstTickCount )
8554         {
8555             traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
8556             /* Wake time has overflowed.  Place this item in the overflow list. */
8557             vListInsert( pxOverflowDelayedList, &( pxCurrentTCB->xStateListItem ) );
8558         }
8559         else
8560         {
8561             traceMOVED_TASK_TO_DELAYED_LIST();
8562             /* The wake time has not overflowed, so the current block list is used. */
8563             vListInsert( pxDelayedList, &( pxCurrentTCB->xStateListItem ) );
8564
8565             /* If the task entering the blocked state was placed at the head of the
8566              * list of blocked tasks then xNextTaskUnblockTime needs to be updated
8567              * too. */
8568             if( xTimeToWake < xNextTaskUnblockTime )
8569             {
8570                 xNextTaskUnblockTime = xTimeToWake;
8571             }
8572             else
8573             {
8574                 mtCOVERAGE_TEST_MARKER();
8575             }
8576         }
8577
8578         /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
8579         ( void ) xCanBlockIndefinitely;
8580     }
8581     #endif /* INCLUDE_vTaskSuspend */
8582 }
8583 /*-----------------------------------------------------------*/
8584
8585 #if ( portUSING_MPU_WRAPPERS == 1 )
8586
8587     xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask )
8588     {
8589         TCB_t * pxTCB;
8590
8591         traceENTER_xTaskGetMPUSettings( xTask );
8592
8593         pxTCB = prvGetTCBFromHandle( xTask );
8594
8595         traceRETURN_xTaskGetMPUSettings( &( pxTCB->xMPUSettings ) );
8596
8597         return &( pxTCB->xMPUSettings );
8598     }
8599
8600 #endif /* portUSING_MPU_WRAPPERS */
8601 /*-----------------------------------------------------------*/
8602
8603 /* Code below here allows additional code to be inserted into this source file,
8604  * especially where access to file scope functions and data is needed (for example
8605  * when performing module tests). */
8606
8607 #ifdef FREERTOS_MODULE_TEST
8608     #include "tasks_test_access_functions.h"
8609 #endif
8610
8611
8612 #if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
8613
8614     #include "freertos_tasks_c_additions.h"
8615
8616     #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
8617         static void freertos_tasks_c_additions_init( void )
8618         {
8619             FREERTOS_TASKS_C_ADDITIONS_INIT();
8620         }
8621     #endif
8622
8623 #endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */
8624 /*-----------------------------------------------------------*/
8625
8626 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
8627
8628 /*
8629  * This is the kernel provided implementation of vApplicationGetIdleTaskMemory()
8630  * to provide the memory that is used by the Idle task. It is used when
8631  * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
8632  * it's own implementation of vApplicationGetIdleTaskMemory by setting
8633  * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
8634  */
8635     void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
8636                                         StackType_t ** ppxIdleTaskStackBuffer,
8637                                         uint32_t * pulIdleTaskStackSize )
8638     {
8639         static StaticTask_t xIdleTaskTCB;
8640         static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ];
8641
8642         *ppxIdleTaskTCBBuffer = &( xIdleTaskTCB );
8643         *ppxIdleTaskStackBuffer = &( uxIdleTaskStack[ 0 ] );
8644         *pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
8645     }
8646
8647     #if ( configNUMBER_OF_CORES > 1 )
8648
8649         void vApplicationGetPassiveIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
8650                                                    StackType_t ** ppxIdleTaskStackBuffer,
8651                                                    uint32_t * pulIdleTaskStackSize,
8652                                                    BaseType_t xPassiveIdleTaskIndex )
8653         {
8654             static StaticTask_t xIdleTaskTCBs[ configNUMBER_OF_CORES - 1 ];
8655             static StackType_t uxIdleTaskStacks[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ];
8656
8657             *ppxIdleTaskTCBBuffer = &( xIdleTaskTCBs[ xPassiveIdleTaskIndex ] );
8658             *ppxIdleTaskStackBuffer = &( uxIdleTaskStacks[ xPassiveIdleTaskIndex ][ 0 ] );
8659             *pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
8660         }
8661
8662     #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
8663
8664 #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
8665 /*-----------------------------------------------------------*/
8666
8667 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
8668
8669 /*
8670  * This is the kernel provided implementation of vApplicationGetTimerTaskMemory()
8671  * to provide the memory that is used by the Timer service task. It is used when
8672  * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
8673  * it's own implementation of vApplicationGetTimerTaskMemory by setting
8674  * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
8675  */
8676     void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer,
8677                                          StackType_t ** ppxTimerTaskStackBuffer,
8678                                          uint32_t * pulTimerTaskStackSize )
8679     {
8680         static StaticTask_t xTimerTaskTCB;
8681         static StackType_t uxTimerTaskStack[ configTIMER_TASK_STACK_DEPTH ];
8682
8683         *ppxTimerTaskTCBBuffer = &( xTimerTaskTCB );
8684         *ppxTimerTaskStackBuffer = &( uxTimerTaskStack[ 0 ] );
8685         *pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
8686     }
8687
8688 #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
8689 /*-----------------------------------------------------------*/