]> begriffs open source - freertos/blob - tasks.c
Remove redundent cancellation point
[freertos] / tasks.c
1 /*
2  * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3  * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * SPDX-License-Identifier: MIT
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy of
8  * this software and associated documentation files (the "Software"), to deal in
9  * the Software without restriction, including without limitation the rights to
10  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11  * the Software, and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * https://www.FreeRTOS.org
25  * https://github.com/FreeRTOS
26  *
27  */
28
29 /* Standard includes. */
30 #include <stdlib.h>
31 #include <string.h>
32
33 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
34  * all the API functions to use the MPU wrappers.  That should only be done when
35  * task.h is included from an application file. */
36 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
37
38 /* FreeRTOS includes. */
39 #include "FreeRTOS.h"
40 #include "task.h"
41 #include "timers.h"
42 #include "stack_macros.h"
43
44 /* The MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
45  * for the header files above, but not in this file, in order to generate the
46  * correct privileged Vs unprivileged linkage and placement. */
47 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
48
49 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
50  * functions but without including stdio.h here. */
51 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
52
53 /* At the bottom of this file are two optional functions that can be used
54  * to generate human readable text from the raw data generated by the
55  * uxTaskGetSystemState() function.  Note the formatting functions are provided
56  * for convenience only, and are NOT considered part of the kernel. */
57     #include <stdio.h>
58 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
59
60 #if ( configUSE_PREEMPTION == 0 )
61
62 /* If the cooperative scheduler is being used then a yield should not be
63  * performed just because a higher priority task has been woken. */
64     #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB )
65     #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB )
66 #else
67
68     #if ( configNUMBER_OF_CORES == 1 )
69
70 /* This macro requests the running task pxTCB to yield. In single core
71  * scheduler, a running task always runs on core 0 and portYIELD_WITHIN_API()
72  * can be used to request the task running on core 0 to yield. Therefore, pxTCB
73  * is not used in this macro. */
74         #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB ) \
75     do {                                                         \
76         ( void ) ( pxTCB );                                      \
77         portYIELD_WITHIN_API();                                  \
78     } while( 0 )
79
80         #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ) \
81     do {                                                        \
82         if( pxCurrentTCB->uxPriority < ( pxTCB )->uxPriority )  \
83         {                                                       \
84             portYIELD_WITHIN_API();                             \
85         }                                                       \
86         else                                                    \
87         {                                                       \
88             mtCOVERAGE_TEST_MARKER();                           \
89         }                                                       \
90     } while( 0 )
91
92     #else /* if ( configNUMBER_OF_CORES == 1 ) */
93
94 /* Yield the core on which this task is running. */
95         #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB )    prvYieldCore( ( pxTCB )->xTaskRunState )
96
97 /* Yield for the task if a running task has priority lower than this task. */
98         #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB )     prvYieldForTask( pxTCB )
99
100     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
101
102 #endif /* if ( configUSE_PREEMPTION == 0 ) */
103
104 /* Values that can be assigned to the ucNotifyState member of the TCB. */
105 #define taskNOT_WAITING_NOTIFICATION              ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
106 #define taskWAITING_NOTIFICATION                  ( ( uint8_t ) 1 )
107 #define taskNOTIFICATION_RECEIVED                 ( ( uint8_t ) 2 )
108
109 /*
110  * The value used to fill the stack of a task when the task is created.  This
111  * is used purely for checking the high water mark for tasks.
112  */
113 #define tskSTACK_FILL_BYTE                        ( 0xa5U )
114
115 /* Bits used to record how a task's stack and TCB were allocated. */
116 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB    ( ( uint8_t ) 0 )
117 #define tskSTATICALLY_ALLOCATED_STACK_ONLY        ( ( uint8_t ) 1 )
118 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB     ( ( uint8_t ) 2 )
119
120 /* If any of the following are set then task stacks are filled with a known
121  * value so the high water mark can be determined.  If none of the following are
122  * set then don't fill the stack so there is no unnecessary dependency on memset. */
123 #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
124     #define tskSET_NEW_STACKS_TO_KNOWN_VALUE    1
125 #else
126     #define tskSET_NEW_STACKS_TO_KNOWN_VALUE    0
127 #endif
128
129 /*
130  * Macros used by vListTask to indicate which state a task is in.
131  */
132 #define tskRUNNING_CHAR      ( 'X' )
133 #define tskBLOCKED_CHAR      ( 'B' )
134 #define tskREADY_CHAR        ( 'R' )
135 #define tskDELETED_CHAR      ( 'D' )
136 #define tskSUSPENDED_CHAR    ( 'S' )
137
138 /*
139  * Some kernel aware debuggers require the data the debugger needs access to to
140  * be global, rather than file scope.
141  */
142 #ifdef portREMOVE_STATIC_QUALIFIER
143     #define static
144 #endif
145
146 /* The name allocated to the Idle task.  This can be overridden by defining
147  * configIDLE_TASK_NAME in FreeRTOSConfig.h. */
148 #ifndef configIDLE_TASK_NAME
149     #define configIDLE_TASK_NAME    "IDLE"
150 #endif
151
152 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
153
154 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
155  * performed in a generic way that is not optimised to any particular
156  * microcontroller architecture. */
157
158 /* uxTopReadyPriority holds the priority of the highest priority ready
159  * state task. */
160     #define taskRECORD_READY_PRIORITY( uxPriority ) \
161     do {                                            \
162         if( ( uxPriority ) > uxTopReadyPriority )   \
163         {                                           \
164             uxTopReadyPriority = ( uxPriority );    \
165         }                                           \
166     } while( 0 ) /* taskRECORD_READY_PRIORITY */
167
168 /*-----------------------------------------------------------*/
169
170     #if ( configNUMBER_OF_CORES == 1 )
171         #define taskSELECT_HIGHEST_PRIORITY_TASK()                            \
172     do {                                                                      \
173         UBaseType_t uxTopPriority = uxTopReadyPriority;                       \
174                                                                               \
175         /* Find the highest priority queue that contains ready tasks. */      \
176         while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
177         {                                                                     \
178             configASSERT( uxTopPriority );                                    \
179             --uxTopPriority;                                                  \
180         }                                                                     \
181                                                                               \
182         /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
183          * the  same priority get an equal share of the processor time. */                    \
184         listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
185         uxTopReadyPriority = uxTopPriority;                                                   \
186     } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */
187     #else /* if ( configNUMBER_OF_CORES == 1 ) */
188
189         #define taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID )    prvSelectHighestPriorityTask( xCoreID )
190
191     #endif /* if ( configNUMBER_OF_CORES == 1 ) */
192
193 /*-----------------------------------------------------------*/
194
195 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
196  * they are only required when a port optimised method of task selection is
197  * being used. */
198     #define taskRESET_READY_PRIORITY( uxPriority )
199     #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
200
201 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
202
203 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
204  * performed in a way that is tailored to the particular microcontroller
205  * architecture being used. */
206
207 /* A port optimised version is provided.  Call the port defined macros. */
208     #define taskRECORD_READY_PRIORITY( uxPriority )    portRECORD_READY_PRIORITY( ( uxPriority ), uxTopReadyPriority )
209
210 /*-----------------------------------------------------------*/
211
212     #define taskSELECT_HIGHEST_PRIORITY_TASK()                                                  \
213     do {                                                                                        \
214         UBaseType_t uxTopPriority;                                                              \
215                                                                                                 \
216         /* Find the highest priority list that contains ready tasks. */                         \
217         portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority );                          \
218         configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
219         listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) );   \
220     } while( 0 )
221
222 /*-----------------------------------------------------------*/
223
224 /* A port optimised version is provided, call it only if the TCB being reset
225  * is being referenced from a ready list.  If it is referenced from a delayed
226  * or suspended list then it won't be in a ready list. */
227     #define taskRESET_READY_PRIORITY( uxPriority )                                                     \
228     do {                                                                                               \
229         if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
230         {                                                                                              \
231             portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) );                        \
232         }                                                                                              \
233     } while( 0 )
234
235 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
236
237 /*-----------------------------------------------------------*/
238
239 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
240  * count overflows. */
241 #define taskSWITCH_DELAYED_LISTS()                                                \
242     do {                                                                          \
243         List_t * pxTemp;                                                          \
244                                                                                   \
245         /* The delayed tasks list should be empty when the lists are switched. */ \
246         configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) );               \
247                                                                                   \
248         pxTemp = pxDelayedTaskList;                                               \
249         pxDelayedTaskList = pxOverflowDelayedTaskList;                            \
250         pxOverflowDelayedTaskList = pxTemp;                                       \
251         xNumOfOverflows++;                                                        \
252         prvResetNextTaskUnblockTime();                                            \
253     } while( 0 )
254
255 /*-----------------------------------------------------------*/
256
257 /*
258  * Place the task represented by pxTCB into the appropriate ready list for
259  * the task.  It is inserted at the end of the list.
260  */
261 #define prvAddTaskToReadyList( pxTCB )                                                                     \
262     do {                                                                                                   \
263         traceMOVED_TASK_TO_READY_STATE( pxTCB );                                                           \
264         taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority );                                                \
265         listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
266         tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB );                                                      \
267     } while( 0 )
268 /*-----------------------------------------------------------*/
269
270 /*
271  * Several functions take a TaskHandle_t parameter that can optionally be NULL,
272  * where NULL is used to indicate that the handle of the currently executing
273  * task should be used in place of the parameter.  This macro simply checks to
274  * see if the parameter is NULL and returns a pointer to the appropriate TCB.
275  */
276 #define prvGetTCBFromHandle( pxHandle )    ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
277
278 /* The item value of the event list item is normally used to hold the priority
279  * of the task to which it belongs (coded to allow it to be held in reverse
280  * priority order).  However, it is occasionally borrowed for other purposes.  It
281  * is important its value is not updated due to a task priority change while it is
282  * being used for another purpose.  The following bit definition is used to inform
283  * the scheduler that the value should not be changed - in which case it is the
284  * responsibility of whichever module is using the value to ensure it gets set back
285  * to its original value when it is released. */
286 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
287     #define taskEVENT_LIST_ITEM_VALUE_IN_USE    ( ( uint16_t ) 0x8000U )
288 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
289     #define taskEVENT_LIST_ITEM_VALUE_IN_USE    ( ( uint32_t ) 0x80000000UL )
290 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
291     #define taskEVENT_LIST_ITEM_VALUE_IN_USE    ( ( uint64_t ) 0x8000000000000000ULL )
292 #endif
293
294 /* Indicates that the task is not actively running on any core. */
295 #define taskTASK_NOT_RUNNING           ( ( BaseType_t ) ( -1 ) )
296
297 /* Indicates that the task is actively running but scheduled to yield. */
298 #define taskTASK_SCHEDULED_TO_YIELD    ( ( BaseType_t ) ( -2 ) )
299
300 /* Returns pdTRUE if the task is actively running and not scheduled to yield. */
301 #if ( configNUMBER_OF_CORES == 1 )
302     #define taskTASK_IS_RUNNING( pxTCB )                          ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
303     #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB )    ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
304 #else
305     #define taskTASK_IS_RUNNING( pxTCB )                          ( ( ( ( pxTCB )->xTaskRunState >= ( BaseType_t ) 0 ) && ( ( pxTCB )->xTaskRunState < ( BaseType_t ) configNUMBER_OF_CORES ) ) ? ( pdTRUE ) : ( pdFALSE ) )
306     #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB )    ( ( ( pxTCB )->xTaskRunState != taskTASK_NOT_RUNNING ) ? ( pdTRUE ) : ( pdFALSE ) )
307 #endif
308
309 /* Indicates that the task is an Idle task. */
310 #define taskATTRIBUTE_IS_IDLE    ( UBaseType_t ) ( 1UL << 0UL )
311
312 #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) )
313     #define portGET_CRITICAL_NESTING_COUNT()          ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting )
314     #define portSET_CRITICAL_NESTING_COUNT( x )       ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting = ( x ) )
315     #define portINCREMENT_CRITICAL_NESTING_COUNT()    ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting++ )
316     #define portDECREMENT_CRITICAL_NESTING_COUNT()    ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- )
317 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */
318
319 #define taskBITS_PER_BYTE    ( ( size_t ) 8 )
320
321 #if ( configNUMBER_OF_CORES > 1 )
322
323 /* Yields the given core. This must be called from a critical section and xCoreID
324  * must be valid. This macro is not required in single core since there is only
325  * one core to yield. */
326     #define prvYieldCore( xCoreID )                                                          \
327     do {                                                                                     \
328         if( ( xCoreID ) == ( BaseType_t ) portGET_CORE_ID() )                                \
329         {                                                                                    \
330             /* Pending a yield for this core since it is in the critical section. */         \
331             xYieldPendings[ ( xCoreID ) ] = pdTRUE;                                          \
332         }                                                                                    \
333         else                                                                                 \
334         {                                                                                    \
335             /* Request other core to yield if it is not requested before. */                 \
336             if( pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD ) \
337             {                                                                                \
338                 portYIELD_CORE( xCoreID );                                                   \
339                 pxCurrentTCBs[ ( xCoreID ) ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD;   \
340             }                                                                                \
341         }                                                                                    \
342     } while( 0 )
343 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
344 /*-----------------------------------------------------------*/
345
346 /*
347  * Task control block.  A task control block (TCB) is allocated for each task,
348  * and stores task state information, including a pointer to the task's context
349  * (the task's run time environment, including register values)
350  */
351 typedef struct tskTaskControlBlock       /* The old naming convention is used to prevent breaking kernel aware debuggers. */
352 {
353     volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack.  THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
354
355     #if ( portUSING_MPU_WRAPPERS == 1 )
356         xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer.  THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
357     #endif
358
359     #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
360         UBaseType_t uxCoreAffinityMask; /**< Used to link the task to certain cores.  UBaseType_t must have greater than or equal to the number of bits as configNUMBER_OF_CORES. */
361     #endif
362
363     ListItem_t xStateListItem;                  /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
364     ListItem_t xEventListItem;                  /**< Used to reference a task from an event list. */
365     UBaseType_t uxPriority;                     /**< The priority of the task.  0 is the lowest priority. */
366     StackType_t * pxStack;                      /**< Points to the start of the stack. */
367     #if ( configNUMBER_OF_CORES > 1 )
368         volatile BaseType_t xTaskRunState;      /**< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */
369         UBaseType_t uxTaskAttributes;           /**< Task's attributes - currently used to identify the idle tasks. */
370     #endif
371     char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created.  Facilitates debugging only. */
372
373     #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
374         BaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
375     #endif
376
377     #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
378         StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
379     #endif
380
381     #if ( portCRITICAL_NESTING_IN_TCB == 1 )
382         UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
383     #endif
384
385     #if ( configUSE_TRACE_FACILITY == 1 )
386         UBaseType_t uxTCBNumber;  /**< Stores a number that increments each time a TCB is created.  It allows debuggers to determine when a task has been deleted and then recreated. */
387         UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */
388     #endif
389
390     #if ( configUSE_MUTEXES == 1 )
391         UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */
392         UBaseType_t uxMutexesHeld;
393     #endif
394
395     #if ( configUSE_APPLICATION_TASK_TAG == 1 )
396         TaskHookFunction_t pxTaskTag;
397     #endif
398
399     #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
400         void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
401     #endif
402
403     #if ( configGENERATE_RUN_TIME_STATS == 1 )
404         configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */
405     #endif
406
407     #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
408         configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */
409     #endif
410
411     #if ( configUSE_TASK_NOTIFICATIONS == 1 )
412         volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
413         volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
414     #endif
415
416     /* See the comments in FreeRTOS.h with the definition of
417      * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
418     #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
419         uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
420     #endif
421
422     #if ( INCLUDE_xTaskAbortDelay == 1 )
423         uint8_t ucDelayAborted;
424     #endif
425
426     #if ( configUSE_POSIX_ERRNO == 1 )
427         int iTaskErrno;
428     #endif
429 } tskTCB;
430
431 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
432  * below to enable the use of older kernel aware debuggers. */
433 typedef tskTCB TCB_t;
434
435 #if ( configNUMBER_OF_CORES == 1 )
436     /* MISRA Ref 8.4.1 [Declaration shall be visible] */
437     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
438     /* coverity[misra_c_2012_rule_8_4_violation] */
439     portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
440 #else
441     /* MISRA Ref 8.4.1 [Declaration shall be visible] */
442     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
443     /* coverity[misra_c_2012_rule_8_4_violation] */
444     portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ];
445     #define pxCurrentTCB    xTaskGetCurrentTaskHandle()
446 #endif
447
448 /* Lists for ready and blocked tasks. --------------------
449  * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
450  * doing so breaks some kernel aware debuggers and debuggers that rely on removing
451  * the static qualifier. */
452 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */
453 PRIVILEGED_DATA static List_t xDelayedTaskList1;                         /**< Delayed tasks. */
454 PRIVILEGED_DATA static List_t xDelayedTaskList2;                         /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
455 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList;              /**< Points to the delayed task list currently being used. */
456 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList;      /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
457 PRIVILEGED_DATA static List_t xPendingReadyList;                         /**< Tasks that have been readied while the scheduler was suspended.  They will be moved to the ready list when the scheduler is resumed. */
458
459 #if ( INCLUDE_vTaskDelete == 1 )
460
461     PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */
462     PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
463
464 #endif
465
466 #if ( INCLUDE_vTaskSuspend == 1 )
467
468     PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */
469
470 #endif
471
472 /* Global POSIX errno. Its value is changed upon context switching to match
473  * the errno of the currently running task. */
474 #if ( configUSE_POSIX_ERRNO == 1 )
475     int FreeRTOS_errno = 0;
476 #endif
477
478 /* Other file private variables. --------------------------------*/
479 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
480 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
481 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
482 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
483 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
484 PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE };
485 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
486 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
487 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
488 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ];       /**< Holds the handles of the idle tasks.  The idle tasks are created automatically when the scheduler is started. */
489
490 /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
491  * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
492  * to determine the number of priority lists to read back from the remote target. */
493 static const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U;
494
495 /* Context switches are held pending while the scheduler is suspended.  Also,
496  * interrupts must not manipulate the xStateListItem of a TCB, or any of the
497  * lists the xStateListItem can be referenced from, if the scheduler is suspended.
498  * If an interrupt needs to unblock a task while the scheduler is suspended then it
499  * moves the task's event list item into the xPendingReadyList, ready for the
500  * kernel to move the task from the pending ready list into the real ready list
501  * when the scheduler is unsuspended.  The pending ready list itself can only be
502  * accessed from a critical section.
503  *
504  * Updates to uxSchedulerSuspended must be protected by both the task lock and the ISR lock
505  * and must not be done from an ISR. Reads must be protected by either lock and may be done
506  * from either an ISR or a task. */
507 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U;
508
509 #if ( configGENERATE_RUN_TIME_STATS == 1 )
510
511 /* Do not move these variables to function scope as doing so prevents the
512  * code working with debuggers that need to remove the static qualifier. */
513 PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0U };    /**< Holds the value of a timer/counter the last time a task was switched in. */
514 PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the total amount of execution time as defined by the run time counter clock. */
515
516 #endif
517
518 /*-----------------------------------------------------------*/
519
520 /* File private functions. --------------------------------*/
521
522 /*
523  * Creates the idle tasks during scheduler start.
524  */
525 static BaseType_t prvCreateIdleTasks( void );
526
527 #if ( configNUMBER_OF_CORES > 1 )
528
529 /*
530  * Checks to see if another task moved the current task out of the ready
531  * list while it was waiting to enter a critical section and yields, if so.
532  */
533     static void prvCheckForRunStateChange( void );
534 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
535
536 #if ( configNUMBER_OF_CORES > 1 )
537
538 /*
539  * Yields a core, or cores if multiple priorities are not allowed to run
540  * simultaneously, to allow the task pxTCB to run.
541  */
542     static void prvYieldForTask( const TCB_t * pxTCB );
543 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
544
545 #if ( configNUMBER_OF_CORES > 1 )
546
547 /*
548  * Selects the highest priority available task for the given core.
549  */
550     static void prvSelectHighestPriorityTask( BaseType_t xCoreID );
551 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
552
553 /**
554  * Utility task that simply returns pdTRUE if the task referenced by xTask is
555  * currently in the Suspended state, or pdFALSE if the task referenced by xTask
556  * is in any other state.
557  */
558 #if ( INCLUDE_vTaskSuspend == 1 )
559
560     static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
561
562 #endif /* INCLUDE_vTaskSuspend */
563
564 /*
565  * Utility to ready all the lists used by the scheduler.  This is called
566  * automatically upon the creation of the first task.
567  */
568 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
569
570 /*
571  * The idle task, which as all tasks is implemented as a never ending loop.
572  * The idle task is automatically created and added to the ready lists upon
573  * creation of the first user task.
574  *
575  * In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 passive idle tasks are also
576  * created to ensure that each core has an idle task to run when no other
577  * task is available to run.
578  *
579  * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
580  * language extensions.  The equivalent prototype for these functions are:
581  *
582  * void prvIdleTask( void *pvParameters );
583  * void prvPassiveIdleTask( void *pvParameters );
584  *
585  */
586 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
587 #if ( configNUMBER_OF_CORES > 1 )
588     static portTASK_FUNCTION_PROTO( prvPassiveIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
589 #endif
590
591 /*
592  * Utility to free all memory allocated by the scheduler to hold a TCB,
593  * including the stack pointed to by the TCB.
594  *
595  * This does not free memory allocated by the task itself (i.e. memory
596  * allocated by calls to pvPortMalloc from within the tasks application code).
597  */
598 #if ( INCLUDE_vTaskDelete == 1 )
599
600     static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
601
602 #endif
603
604 /*
605  * Used only by the idle task.  This checks to see if anything has been placed
606  * in the list of tasks waiting to be deleted.  If so the task is cleaned up
607  * and its TCB deleted.
608  */
609 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
610
611 /*
612  * The currently executing task is entering the Blocked state.  Add the task to
613  * either the current or the overflow delayed task list.
614  */
615 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
616                                             const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
617
618 /*
619  * Fills an TaskStatus_t structure with information on each task that is
620  * referenced from the pxList list (which may be a ready list, a delayed list,
621  * a suspended list, etc.).
622  *
623  * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
624  * NORMAL APPLICATION CODE.
625  */
626 #if ( configUSE_TRACE_FACILITY == 1 )
627
628     static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
629                                                      List_t * pxList,
630                                                      eTaskState eState ) PRIVILEGED_FUNCTION;
631
632 #endif
633
634 /*
635  * Searches pxList for a task with name pcNameToQuery - returning a handle to
636  * the task if it is found, or NULL if the task is not found.
637  */
638 #if ( INCLUDE_xTaskGetHandle == 1 )
639
640     static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
641                                                      const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
642
643 #endif
644
645 /*
646  * When a task is created, the stack of the task is filled with a known value.
647  * This function determines the 'high water mark' of the task stack by
648  * determining how much of the stack remains at the original preset value.
649  */
650 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
651
652     static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
653
654 #endif
655
656 /*
657  * Return the amount of time, in ticks, that will pass before the kernel will
658  * next move a task from the Blocked state to the Running state.
659  *
660  * This conditional compilation should use inequality to 0, not equality to 1.
661  * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
662  * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
663  * set to a value other than 1.
664  */
665 #if ( configUSE_TICKLESS_IDLE != 0 )
666
667     static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
668
669 #endif
670
671 /*
672  * Set xNextTaskUnblockTime to the time at which the next Blocked state task
673  * will exit the Blocked state.
674  */
675 static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
676
677 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
678
679 /*
680  * Helper function used to pad task names with spaces when printing out
681  * human readable tables of task information.
682  */
683     static char * prvWriteNameToBuffer( char * pcBuffer,
684                                         const char * pcTaskName ) PRIVILEGED_FUNCTION;
685
686 #endif
687
688 /*
689  * Called after a Task_t structure has been allocated either statically or
690  * dynamically to fill in the structure's members.
691  */
692 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
693                                   const char * const pcName,
694                                   const uint32_t ulStackDepth,
695                                   void * const pvParameters,
696                                   UBaseType_t uxPriority,
697                                   TaskHandle_t * const pxCreatedTask,
698                                   TCB_t * pxNewTCB,
699                                   const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
700
701 /*
702  * Called after a new task has been created and initialised to place the task
703  * under the control of the scheduler.
704  */
705 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
706
707 /*
708  * Create a task with static buffer for both TCB and stack. Returns a handle to
709  * the task if it is created successfully. Otherwise, returns NULL.
710  */
711 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
712     static TCB_t * prvCreateStaticTask( TaskFunction_t pxTaskCode,
713                                         const char * const pcName,
714                                         const uint32_t ulStackDepth,
715                                         void * const pvParameters,
716                                         UBaseType_t uxPriority,
717                                         StackType_t * const puxStackBuffer,
718                                         StaticTask_t * const pxTaskBuffer,
719                                         TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
720 #endif /* #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
721
722 /*
723  * Create a restricted task with static buffer for both TCB and stack. Returns
724  * a handle to the task if it is created successfully. Otherwise, returns NULL.
725  */
726 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
727     static TCB_t * prvCreateRestrictedStaticTask( const TaskParameters_t * const pxTaskDefinition,
728                                                   TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
729 #endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
730
731 /*
732  * Create a restricted task with static buffer for task stack and allocated buffer
733  * for TCB. Returns a handle to the task if it is created successfully. Otherwise,
734  * returns NULL.
735  */
736 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
737     static TCB_t * prvCreateRestrictedTask( const TaskParameters_t * const pxTaskDefinition,
738                                             TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
739 #endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
740
741 /*
742  * Create a task with allocated buffer for both TCB and stack. Returns a handle to
743  * the task if it is created successfully. Otherwise, returns NULL.
744  */
745 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
746     static TCB_t * prvCreateTask( TaskFunction_t pxTaskCode,
747                                   const char * const pcName,
748                                   const configSTACK_DEPTH_TYPE usStackDepth,
749                                   void * const pvParameters,
750                                   UBaseType_t uxPriority,
751                                   TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
752 #endif /* #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */
753
754 /*
755  * freertos_tasks_c_additions_init() should only be called if the user definable
756  * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
757  * called by the function.
758  */
759 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
760
761     static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
762
763 #endif
764
765 #if ( configUSE_PASSIVE_IDLE_HOOK == 1 )
766     extern void vApplicationPassiveIdleHook( void );
767 #endif /* #if ( configUSE_PASSIVE_IDLE_HOOK == 1 ) */
768
769 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
770
771 /*
772  * Convert the snprintf return value to the number of characters
773  * written. The following are the possible cases:
774  *
775  * 1. The buffer supplied to snprintf is large enough to hold the
776  *    generated string. The return value in this case is the number
777  *    of characters actually written, not counting the terminating
778  *    null character.
779  * 2. The buffer supplied to snprintf is NOT large enough to hold
780  *    the generated string. The return value in this case is the
781  *    number of characters that would have been written if the
782  *    buffer had been sufficiently large, not counting the
783  *    terminating null character.
784  * 3. Encoding error. The return value in this case is a negative
785  *    number.
786  *
787  * From 1 and 2 above ==> Only when the return value is non-negative
788  * and less than the supplied buffer length, the string has been
789  * completely written.
790  */
791     static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
792                                                         size_t n );
793
794 #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
795 /*-----------------------------------------------------------*/
796
797 #if ( configNUMBER_OF_CORES > 1 )
798     static void prvCheckForRunStateChange( void )
799     {
800         UBaseType_t uxPrevCriticalNesting;
801         const TCB_t * pxThisTCB;
802
803         /* This must only be called from within a task. */
804         portASSERT_IF_IN_ISR();
805
806         /* This function is always called with interrupts disabled
807          * so this is safe. */
808         pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ];
809
810         while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
811         {
812             /* We are only here if we just entered a critical section
813             * or if we just suspended the scheduler, and another task
814             * has requested that we yield.
815             *
816             * This is slightly complicated since we need to save and restore
817             * the suspension and critical nesting counts, as well as release
818             * and reacquire the correct locks. And then, do it all over again
819             * if our state changed again during the reacquisition. */
820             uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT();
821
822             if( uxPrevCriticalNesting > 0U )
823             {
824                 portSET_CRITICAL_NESTING_COUNT( 0U );
825                 portRELEASE_ISR_LOCK();
826             }
827             else
828             {
829                 /* The scheduler is suspended. uxSchedulerSuspended is updated
830                  * only when the task is not requested to yield. */
831                 mtCOVERAGE_TEST_MARKER();
832             }
833
834             portRELEASE_TASK_LOCK();
835             portMEMORY_BARRIER();
836             configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD );
837
838             portENABLE_INTERRUPTS();
839
840             /* Enabling interrupts should cause this core to immediately
841              * service the pending interrupt and yield. If the run state is still
842              * yielding here then that is a problem. */
843             configASSERT( pxThisTCB->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD );
844
845             portDISABLE_INTERRUPTS();
846             portGET_TASK_LOCK();
847             portGET_ISR_LOCK();
848
849             portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting );
850
851             if( uxPrevCriticalNesting == 0U )
852             {
853                 portRELEASE_ISR_LOCK();
854             }
855         }
856     }
857 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
858
859 /*-----------------------------------------------------------*/
860
861 #if ( configNUMBER_OF_CORES > 1 )
862     static void prvYieldForTask( const TCB_t * pxTCB )
863     {
864         BaseType_t xLowestPriorityToPreempt;
865         BaseType_t xCurrentCoreTaskPriority;
866         BaseType_t xLowestPriorityCore = ( BaseType_t ) -1;
867         BaseType_t xCoreID;
868
869         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
870             BaseType_t xYieldCount = 0;
871         #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
872
873         /* This must be called from a critical section. */
874         configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
875
876         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
877
878             /* No task should yield for this one if it is a lower priority
879              * than priority level of currently ready tasks. */
880             if( pxTCB->uxPriority >= uxTopReadyPriority )
881         #else
882             /* Yield is not required for a task which is already running. */
883             if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
884         #endif
885         {
886             xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority;
887
888             /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB
889              * is 0. This is ok as we will give system idle tasks a priority of -1 below. */
890             --xLowestPriorityToPreempt;
891
892             for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
893             {
894                 xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority;
895
896                 /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */
897                 if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
898                 {
899                     xCurrentCoreTaskPriority = xCurrentCoreTaskPriority - 1;
900                 }
901
902                 if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) )
903                 {
904                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
905                         if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
906                     #endif
907                     {
908                         if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt )
909                         {
910                             #if ( configUSE_CORE_AFFINITY == 1 )
911                                 if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
912                             #endif
913                             {
914                                 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
915                                     if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
916                                 #endif
917                                 {
918                                     xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
919                                     xLowestPriorityCore = xCoreID;
920                                 }
921                             }
922                         }
923                         else
924                         {
925                             mtCOVERAGE_TEST_MARKER();
926                         }
927                     }
928
929                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
930                     {
931                         /* Yield all currently running non-idle tasks with a priority lower than
932                          * the task that needs to run. */
933                         if( ( xCurrentCoreTaskPriority > ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) ) &&
934                             ( xCurrentCoreTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) )
935                         {
936                             prvYieldCore( xCoreID );
937                             xYieldCount++;
938                         }
939                         else
940                         {
941                             mtCOVERAGE_TEST_MARKER();
942                         }
943                     }
944                     #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
945                 }
946                 else
947                 {
948                     mtCOVERAGE_TEST_MARKER();
949                 }
950             }
951
952             #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
953                 if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) )
954             #else /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
955                 if( xLowestPriorityCore >= 0 )
956             #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
957             {
958                 prvYieldCore( xLowestPriorityCore );
959             }
960
961             #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
962                 /* Verify that the calling core always yields to higher priority tasks. */
963                 if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0U ) &&
964                     ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) )
965                 {
966                     configASSERT( ( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) ||
967                                   ( taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ) );
968                 }
969             #endif
970         }
971     }
972 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
973 /*-----------------------------------------------------------*/
974
975 #if ( configNUMBER_OF_CORES > 1 )
976     static void prvSelectHighestPriorityTask( BaseType_t xCoreID )
977     {
978         UBaseType_t uxCurrentPriority = uxTopReadyPriority;
979         BaseType_t xTaskScheduled = pdFALSE;
980         BaseType_t xDecrementTopPriority = pdTRUE;
981
982         #if ( configUSE_CORE_AFFINITY == 1 )
983             const TCB_t * pxPreviousTCB = NULL;
984         #endif
985         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
986             BaseType_t xPriorityDropped = pdFALSE;
987         #endif
988
989         /* This function should be called when scheduler is running. */
990         configASSERT( xSchedulerRunning == pdTRUE );
991
992         /* A new task is created and a running task with the same priority yields
993          * itself to run the new task. When a running task yields itself, it is still
994          * in the ready list. This running task will be selected before the new task
995          * since the new task is always added to the end of the ready list.
996          * The other problem is that the running task still in the same position of
997          * the ready list when it yields itself. It is possible that it will be selected
998          * earlier then other tasks which waits longer than this task.
999          *
1000          * To fix these problems, the running task should be put to the end of the
1001          * ready list before searching for the ready task in the ready list. */
1002         if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
1003                                      &pxCurrentTCBs[ xCoreID ]->xStateListItem ) == pdTRUE )
1004         {
1005             ( void ) uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem );
1006             vListInsertEnd( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
1007                             &pxCurrentTCBs[ xCoreID ]->xStateListItem );
1008         }
1009
1010         while( xTaskScheduled == pdFALSE )
1011         {
1012             #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1013             {
1014                 if( uxCurrentPriority < uxTopReadyPriority )
1015                 {
1016                     /* We can't schedule any tasks, other than idle, that have a
1017                      * priority lower than the priority of a task currently running
1018                      * on another core. */
1019                     uxCurrentPriority = tskIDLE_PRIORITY;
1020                 }
1021             }
1022             #endif
1023
1024             if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE )
1025             {
1026                 const List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] );
1027                 const ListItem_t * pxEndMarker = listGET_END_MARKER( pxReadyList );
1028                 ListItem_t * pxIterator;
1029
1030                 /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority
1031                  * must not be decremented any further. */
1032                 xDecrementTopPriority = pdFALSE;
1033
1034                 for( pxIterator = listGET_HEAD_ENTRY( pxReadyList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
1035                 {
1036                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
1037                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1038                     /* coverity[misra_c_2012_rule_11_5_violation] */
1039                     TCB_t * pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxIterator );
1040
1041                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1042                     {
1043                         /* When falling back to the idle priority because only one priority
1044                          * level is allowed to run at a time, we should ONLY schedule the true
1045                          * idle tasks, not user tasks at the idle priority. */
1046                         if( uxCurrentPriority < uxTopReadyPriority )
1047                         {
1048                             if( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0U )
1049                             {
1050                                 continue;
1051                             }
1052                         }
1053                     }
1054                     #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
1055
1056                     if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
1057                     {
1058                         #if ( configUSE_CORE_AFFINITY == 1 )
1059                             if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1060                         #endif
1061                         {
1062                             /* If the task is not being executed by any core swap it in. */
1063                             pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING;
1064                             #if ( configUSE_CORE_AFFINITY == 1 )
1065                                 pxPreviousTCB = pxCurrentTCBs[ xCoreID ];
1066                             #endif
1067                             pxTCB->xTaskRunState = xCoreID;
1068                             pxCurrentTCBs[ xCoreID ] = pxTCB;
1069                             xTaskScheduled = pdTRUE;
1070                         }
1071                     }
1072                     else if( pxTCB == pxCurrentTCBs[ xCoreID ] )
1073                     {
1074                         configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) );
1075
1076                         #if ( configUSE_CORE_AFFINITY == 1 )
1077                             if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1078                         #endif
1079                         {
1080                             /* The task is already running on this core, mark it as scheduled. */
1081                             pxTCB->xTaskRunState = xCoreID;
1082                             xTaskScheduled = pdTRUE;
1083                         }
1084                     }
1085                     else
1086                     {
1087                         /* This task is running on the core other than xCoreID. */
1088                         mtCOVERAGE_TEST_MARKER();
1089                     }
1090
1091                     if( xTaskScheduled != pdFALSE )
1092                     {
1093                         /* A task has been selected to run on this core. */
1094                         break;
1095                     }
1096                 }
1097             }
1098             else
1099             {
1100                 if( xDecrementTopPriority != pdFALSE )
1101                 {
1102                     uxTopReadyPriority--;
1103                     #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1104                     {
1105                         xPriorityDropped = pdTRUE;
1106                     }
1107                     #endif
1108                 }
1109             }
1110
1111             /* There are configNUMBER_OF_CORES Idle tasks created when scheduler started.
1112              * The scheduler should be able to select a task to run when uxCurrentPriority
1113              * is tskIDLE_PRIORITY. uxCurrentPriority is never decreased to value blow
1114              * tskIDLE_PRIORITY. */
1115             if( uxCurrentPriority > tskIDLE_PRIORITY )
1116             {
1117                 uxCurrentPriority--;
1118             }
1119             else
1120             {
1121                 /* This function is called when idle task is not created. Break the
1122                  * loop to prevent uxCurrentPriority overrun. */
1123                 break;
1124             }
1125         }
1126
1127         #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1128         {
1129             if( xTaskScheduled == pdTRUE )
1130             {
1131                 if( xPriorityDropped != pdFALSE )
1132                 {
1133                     /* There may be several ready tasks that were being prevented from running because there was
1134                      * a higher priority task running. Now that the last of the higher priority tasks is no longer
1135                      * running, make sure all the other idle tasks yield. */
1136                     BaseType_t x;
1137
1138                     for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ )
1139                     {
1140                         if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1141                         {
1142                             prvYieldCore( x );
1143                         }
1144                     }
1145                 }
1146             }
1147         }
1148         #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
1149
1150         #if ( configUSE_CORE_AFFINITY == 1 )
1151         {
1152             if( xTaskScheduled == pdTRUE )
1153             {
1154                 if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) )
1155                 {
1156                     /* A ready task was just evicted from this core. See if it can be
1157                      * scheduled on any other core. */
1158                     UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask;
1159                     BaseType_t xLowestPriority = ( BaseType_t ) pxPreviousTCB->uxPriority;
1160                     BaseType_t xLowestPriorityCore = -1;
1161                     BaseType_t x;
1162
1163                     if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1164                     {
1165                         xLowestPriority = xLowestPriority - 1;
1166                     }
1167
1168                     if( ( uxCoreMap & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1169                     {
1170                         /* pxPreviousTCB was removed from this core and this core is not excluded
1171                          * from it's core affinity mask.
1172                          *
1173                          * pxPreviousTCB is preempted by the new higher priority task
1174                          * pxCurrentTCBs[ xCoreID ]. When searching a new core for pxPreviousTCB,
1175                          * we do not need to look at the cores on which pxCurrentTCBs[ xCoreID ]
1176                          * is allowed to run. The reason is - when more than one cores are
1177                          * eligible for an incoming task, we preempt the core with the minimum
1178                          * priority task. Because this core (i.e. xCoreID) was preempted for
1179                          * pxCurrentTCBs[ xCoreID ], this means that all the others cores
1180                          * where pxCurrentTCBs[ xCoreID ] can run, are running tasks with priority
1181                          * no lower than pxPreviousTCB's priority. Therefore, the only cores where
1182                          * which can be preempted for pxPreviousTCB are the ones where
1183                          * pxCurrentTCBs[ xCoreID ] is not allowed to run (and obviously,
1184                          * pxPreviousTCB is allowed to run).
1185                          *
1186                          * This is an optimization which reduces the number of cores needed to be
1187                          * searched for pxPreviousTCB to run. */
1188                         uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask );
1189                     }
1190                     else
1191                     {
1192                         /* pxPreviousTCB's core affinity mask is changed and it is no longer
1193                          * allowed to run on this core. Searching all the cores in pxPreviousTCB's
1194                          * new core affinity mask to find a core on which it can run. */
1195                     }
1196
1197                     uxCoreMap &= ( ( 1U << configNUMBER_OF_CORES ) - 1U );
1198
1199                     for( x = ( ( BaseType_t ) configNUMBER_OF_CORES - 1 ); x >= ( BaseType_t ) 0; x-- )
1200                     {
1201                         UBaseType_t uxCore = ( UBaseType_t ) x;
1202                         BaseType_t xTaskPriority;
1203
1204                         if( ( uxCoreMap & ( ( UBaseType_t ) 1U << uxCore ) ) != 0U )
1205                         {
1206                             xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority;
1207
1208                             if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1209                             {
1210                                 xTaskPriority = xTaskPriority - ( BaseType_t ) 1;
1211                             }
1212
1213                             uxCoreMap &= ~( ( UBaseType_t ) 1U << uxCore );
1214
1215                             if( ( xTaskPriority < xLowestPriority ) &&
1216                                 ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) &&
1217                                 ( xYieldPendings[ uxCore ] == pdFALSE ) )
1218                             {
1219                                 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
1220                                     if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE )
1221                                 #endif
1222                                 {
1223                                     xLowestPriority = xTaskPriority;
1224                                     xLowestPriorityCore = ( BaseType_t ) uxCore;
1225                                 }
1226                             }
1227                         }
1228                     }
1229
1230                     if( xLowestPriorityCore >= 0 )
1231                     {
1232                         prvYieldCore( xLowestPriorityCore );
1233                     }
1234                 }
1235             }
1236         }
1237         #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */
1238     }
1239
1240 #endif /* ( configNUMBER_OF_CORES > 1 ) */
1241
1242 /*-----------------------------------------------------------*/
1243
1244 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
1245
1246     static TCB_t * prvCreateStaticTask( TaskFunction_t pxTaskCode,
1247                                         const char * const pcName,
1248                                         const uint32_t ulStackDepth,
1249                                         void * const pvParameters,
1250                                         UBaseType_t uxPriority,
1251                                         StackType_t * const puxStackBuffer,
1252                                         StaticTask_t * const pxTaskBuffer,
1253                                         TaskHandle_t * const pxCreatedTask )
1254     {
1255         TCB_t * pxNewTCB;
1256
1257         configASSERT( puxStackBuffer != NULL );
1258         configASSERT( pxTaskBuffer != NULL );
1259
1260         #if ( configASSERT_DEFINED == 1 )
1261         {
1262             /* Sanity check that the size of the structure used to declare a
1263              * variable of type StaticTask_t equals the size of the real task
1264              * structure. */
1265             volatile size_t xSize = sizeof( StaticTask_t );
1266             configASSERT( xSize == sizeof( TCB_t ) );
1267             ( void ) xSize; /* Prevent unused variable warning when configASSERT() is not used. */
1268         }
1269         #endif /* configASSERT_DEFINED */
1270
1271         if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
1272         {
1273             /* The memory used for the task's TCB and stack are passed into this
1274              * function - use them. */
1275             /* MISRA Ref 11.3.1 [Misaligned access] */
1276             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
1277             /* coverity[misra_c_2012_rule_11_3_violation] */
1278             pxNewTCB = ( TCB_t * ) pxTaskBuffer;
1279             ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1280             pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
1281
1282             #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1283             {
1284                 /* Tasks can be created statically or dynamically, so note this
1285                  * task was created statically in case the task is later deleted. */
1286                 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
1287             }
1288             #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1289
1290             prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
1291         }
1292         else
1293         {
1294             pxNewTCB = NULL;
1295         }
1296
1297         return pxNewTCB;
1298     }
1299 /*-----------------------------------------------------------*/
1300
1301     TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
1302                                     const char * const pcName,
1303                                     const uint32_t ulStackDepth,
1304                                     void * const pvParameters,
1305                                     UBaseType_t uxPriority,
1306                                     StackType_t * const puxStackBuffer,
1307                                     StaticTask_t * const pxTaskBuffer )
1308     {
1309         TaskHandle_t xReturn = NULL;
1310         TCB_t * pxNewTCB;
1311
1312         traceENTER_xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer );
1313
1314         pxNewTCB = prvCreateStaticTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, &xReturn );
1315
1316         if( pxNewTCB != NULL )
1317         {
1318             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1319             {
1320                 /* Set the task's affinity before scheduling it. */
1321                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1322             }
1323             #endif
1324
1325             prvAddNewTaskToReadyList( pxNewTCB );
1326         }
1327         else
1328         {
1329             mtCOVERAGE_TEST_MARKER();
1330         }
1331
1332         traceRETURN_xTaskCreateStatic( xReturn );
1333
1334         return xReturn;
1335     }
1336 /*-----------------------------------------------------------*/
1337
1338     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1339         TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode,
1340                                                    const char * const pcName,
1341                                                    const uint32_t ulStackDepth,
1342                                                    void * const pvParameters,
1343                                                    UBaseType_t uxPriority,
1344                                                    StackType_t * const puxStackBuffer,
1345                                                    StaticTask_t * const pxTaskBuffer,
1346                                                    UBaseType_t uxCoreAffinityMask )
1347         {
1348             TaskHandle_t xReturn = NULL;
1349             TCB_t * pxNewTCB;
1350
1351             traceENTER_xTaskCreateStaticAffinitySet( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, uxCoreAffinityMask );
1352
1353             pxNewTCB = prvCreateStaticTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, &xReturn );
1354
1355             if( pxNewTCB != NULL )
1356             {
1357                 /* Set the task's affinity before scheduling it. */
1358                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1359
1360                 prvAddNewTaskToReadyList( pxNewTCB );
1361             }
1362             else
1363             {
1364                 mtCOVERAGE_TEST_MARKER();
1365             }
1366
1367             traceRETURN_xTaskCreateStaticAffinitySet( xReturn );
1368
1369             return xReturn;
1370         }
1371     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1372
1373 #endif /* SUPPORT_STATIC_ALLOCATION */
1374 /*-----------------------------------------------------------*/
1375
1376 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
1377     static TCB_t * prvCreateRestrictedStaticTask( const TaskParameters_t * const pxTaskDefinition,
1378                                                   TaskHandle_t * const pxCreatedTask )
1379     {
1380         TCB_t * pxNewTCB;
1381
1382         configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
1383         configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
1384
1385         if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
1386         {
1387             /* Allocate space for the TCB.  Where the memory comes from depends
1388              * on the implementation of the port malloc function and whether or
1389              * not static allocation is being used. */
1390             pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
1391             ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1392
1393             /* Store the stack location in the TCB. */
1394             pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
1395
1396             #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1397             {
1398                 /* Tasks can be created statically or dynamically, so note this
1399                  * task was created statically in case the task is later deleted. */
1400                 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
1401             }
1402             #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1403
1404             prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
1405                                   pxTaskDefinition->pcName,
1406                                   ( uint32_t ) pxTaskDefinition->usStackDepth,
1407                                   pxTaskDefinition->pvParameters,
1408                                   pxTaskDefinition->uxPriority,
1409                                   pxCreatedTask, pxNewTCB,
1410                                   pxTaskDefinition->xRegions );
1411         }
1412         else
1413         {
1414             pxNewTCB = NULL;
1415         }
1416
1417         return pxNewTCB;
1418     }
1419 /*-----------------------------------------------------------*/
1420
1421     BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
1422                                             TaskHandle_t * pxCreatedTask )
1423     {
1424         TCB_t * pxNewTCB;
1425         BaseType_t xReturn;
1426
1427         traceENTER_xTaskCreateRestrictedStatic( pxTaskDefinition, pxCreatedTask );
1428
1429         configASSERT( pxTaskDefinition != NULL );
1430
1431         pxNewTCB = prvCreateRestrictedStaticTask( pxTaskDefinition, pxCreatedTask );
1432
1433         if( pxNewTCB != NULL )
1434         {
1435             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1436             {
1437                 /* Set the task's affinity before scheduling it. */
1438                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1439             }
1440             #endif
1441
1442             prvAddNewTaskToReadyList( pxNewTCB );
1443             xReturn = pdPASS;
1444         }
1445         else
1446         {
1447             xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1448         }
1449
1450         traceRETURN_xTaskCreateRestrictedStatic( xReturn );
1451
1452         return xReturn;
1453     }
1454 /*-----------------------------------------------------------*/
1455
1456     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1457         BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition,
1458                                                            UBaseType_t uxCoreAffinityMask,
1459                                                            TaskHandle_t * pxCreatedTask )
1460         {
1461             TCB_t * pxNewTCB;
1462             BaseType_t xReturn;
1463
1464             traceENTER_xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, uxCoreAffinityMask, pxCreatedTask );
1465
1466             configASSERT( pxTaskDefinition != NULL );
1467
1468             pxNewTCB = prvCreateRestrictedStaticTask( pxTaskDefinition, pxCreatedTask );
1469
1470             if( pxNewTCB != NULL )
1471             {
1472                 /* Set the task's affinity before scheduling it. */
1473                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1474
1475                 prvAddNewTaskToReadyList( pxNewTCB );
1476                 xReturn = pdPASS;
1477             }
1478             else
1479             {
1480                 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1481             }
1482
1483             traceRETURN_xTaskCreateRestrictedStaticAffinitySet( xReturn );
1484
1485             return xReturn;
1486         }
1487     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1488
1489 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
1490 /*-----------------------------------------------------------*/
1491
1492 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
1493     static TCB_t * prvCreateRestrictedTask( const TaskParameters_t * const pxTaskDefinition,
1494                                             TaskHandle_t * const pxCreatedTask )
1495     {
1496         TCB_t * pxNewTCB;
1497
1498         configASSERT( pxTaskDefinition->puxStackBuffer );
1499
1500         if( pxTaskDefinition->puxStackBuffer != NULL )
1501         {
1502             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1503             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1504             /* coverity[misra_c_2012_rule_11_5_violation] */
1505             pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1506
1507             if( pxNewTCB != NULL )
1508             {
1509                 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1510
1511                 /* Store the stack location in the TCB. */
1512                 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
1513
1514                 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1515                 {
1516                     /* Tasks can be created statically or dynamically, so note
1517                      * this task had a statically allocated stack in case it is
1518                      * later deleted.  The TCB was allocated dynamically. */
1519                     pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
1520                 }
1521                 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1522
1523                 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
1524                                       pxTaskDefinition->pcName,
1525                                       ( uint32_t ) pxTaskDefinition->usStackDepth,
1526                                       pxTaskDefinition->pvParameters,
1527                                       pxTaskDefinition->uxPriority,
1528                                       pxCreatedTask, pxNewTCB,
1529                                       pxTaskDefinition->xRegions );
1530             }
1531         }
1532         else
1533         {
1534             pxNewTCB = NULL;
1535         }
1536
1537         return pxNewTCB;
1538     }
1539 /*-----------------------------------------------------------*/
1540
1541     BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
1542                                       TaskHandle_t * pxCreatedTask )
1543     {
1544         TCB_t * pxNewTCB;
1545         BaseType_t xReturn;
1546
1547         traceENTER_xTaskCreateRestricted( pxTaskDefinition, pxCreatedTask );
1548
1549         pxNewTCB = prvCreateRestrictedTask( pxTaskDefinition, pxCreatedTask );
1550
1551         if( pxNewTCB != NULL )
1552         {
1553             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1554             {
1555                 /* Set the task's affinity before scheduling it. */
1556                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1557             }
1558             #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1559
1560             prvAddNewTaskToReadyList( pxNewTCB );
1561
1562             xReturn = pdPASS;
1563         }
1564         else
1565         {
1566             xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1567         }
1568
1569         traceRETURN_xTaskCreateRestricted( xReturn );
1570
1571         return xReturn;
1572     }
1573 /*-----------------------------------------------------------*/
1574
1575     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1576         BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition,
1577                                                      UBaseType_t uxCoreAffinityMask,
1578                                                      TaskHandle_t * pxCreatedTask )
1579         {
1580             TCB_t * pxNewTCB;
1581             BaseType_t xReturn;
1582
1583             traceENTER_xTaskCreateRestrictedAffinitySet( pxTaskDefinition, uxCoreAffinityMask, pxCreatedTask );
1584
1585             pxNewTCB = prvCreateRestrictedTask( pxTaskDefinition, pxCreatedTask );
1586
1587             if( pxNewTCB != NULL )
1588             {
1589                 /* Set the task's affinity before scheduling it. */
1590                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1591
1592                 prvAddNewTaskToReadyList( pxNewTCB );
1593
1594                 xReturn = pdPASS;
1595             }
1596             else
1597             {
1598                 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1599             }
1600
1601             traceRETURN_xTaskCreateRestrictedAffinitySet( xReturn );
1602
1603             return xReturn;
1604         }
1605     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1606
1607
1608 #endif /* portUSING_MPU_WRAPPERS */
1609 /*-----------------------------------------------------------*/
1610
1611 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
1612     static TCB_t * prvCreateTask( TaskFunction_t pxTaskCode,
1613                                   const char * const pcName,
1614                                   const configSTACK_DEPTH_TYPE usStackDepth,
1615                                   void * const pvParameters,
1616                                   UBaseType_t uxPriority,
1617                                   TaskHandle_t * const pxCreatedTask )
1618     {
1619         TCB_t * pxNewTCB;
1620
1621         /* If the stack grows down then allocate the stack then the TCB so the stack
1622          * does not grow into the TCB.  Likewise if the stack grows up then allocate
1623          * the TCB then the stack. */
1624         #if ( portSTACK_GROWTH > 0 )
1625         {
1626             /* Allocate space for the TCB.  Where the memory comes from depends on
1627              * the implementation of the port malloc function and whether or not static
1628              * allocation is being used. */
1629             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1630             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1631             /* coverity[misra_c_2012_rule_11_5_violation] */
1632             pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1633
1634             if( pxNewTCB != NULL )
1635             {
1636                 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1637
1638                 /* Allocate space for the stack used by the task being created.
1639                  * The base of the stack memory stored in the TCB so the task can
1640                  * be deleted later if required. */
1641                 /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1642                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1643                 /* coverity[misra_c_2012_rule_11_5_violation] */
1644                 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) );
1645
1646                 if( pxNewTCB->pxStack == NULL )
1647                 {
1648                     /* Could not allocate the stack.  Delete the allocated TCB. */
1649                     vPortFree( pxNewTCB );
1650                     pxNewTCB = NULL;
1651                 }
1652             }
1653         }
1654         #else /* portSTACK_GROWTH */
1655         {
1656             StackType_t * pxStack;
1657
1658             /* Allocate space for the stack used by the task being created. */
1659             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1660             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1661             /* coverity[misra_c_2012_rule_11_5_violation] */
1662             pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) );
1663
1664             if( pxStack != NULL )
1665             {
1666                 /* Allocate space for the TCB. */
1667                 /* MISRA Ref 11.5.1 [Malloc memory assignment] */
1668                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
1669                 /* coverity[misra_c_2012_rule_11_5_violation] */
1670                 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1671
1672                 if( pxNewTCB != NULL )
1673                 {
1674                     ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1675
1676                     /* Store the stack location in the TCB. */
1677                     pxNewTCB->pxStack = pxStack;
1678                 }
1679                 else
1680                 {
1681                     /* The stack cannot be used as the TCB was not created.  Free
1682                      * it again. */
1683                     vPortFreeStack( pxStack );
1684                 }
1685             }
1686             else
1687             {
1688                 pxNewTCB = NULL;
1689             }
1690         }
1691         #endif /* portSTACK_GROWTH */
1692
1693         if( pxNewTCB != NULL )
1694         {
1695             #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1696             {
1697                 /* Tasks can be created statically or dynamically, so note this
1698                  * task was created dynamically in case it is later deleted. */
1699                 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
1700             }
1701             #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1702
1703             prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
1704         }
1705
1706         return pxNewTCB;
1707     }
1708 /*-----------------------------------------------------------*/
1709
1710     BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
1711                             const char * const pcName,
1712                             const configSTACK_DEPTH_TYPE usStackDepth,
1713                             void * const pvParameters,
1714                             UBaseType_t uxPriority,
1715                             TaskHandle_t * const pxCreatedTask )
1716     {
1717         TCB_t * pxNewTCB;
1718         BaseType_t xReturn;
1719
1720         traceENTER_xTaskCreate( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
1721
1722         pxNewTCB = prvCreateTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
1723
1724         if( pxNewTCB != NULL )
1725         {
1726             #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1727             {
1728                 /* Set the task's affinity before scheduling it. */
1729                 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1730             }
1731             #endif
1732
1733             prvAddNewTaskToReadyList( pxNewTCB );
1734             xReturn = pdPASS;
1735         }
1736         else
1737         {
1738             xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1739         }
1740
1741         traceRETURN_xTaskCreate( xReturn );
1742
1743         return xReturn;
1744     }
1745 /*-----------------------------------------------------------*/
1746
1747     #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1748         BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode,
1749                                            const char * const pcName,
1750                                            const configSTACK_DEPTH_TYPE usStackDepth,
1751                                            void * const pvParameters,
1752                                            UBaseType_t uxPriority,
1753                                            UBaseType_t uxCoreAffinityMask,
1754                                            TaskHandle_t * const pxCreatedTask )
1755         {
1756             TCB_t * pxNewTCB;
1757             BaseType_t xReturn;
1758
1759             traceENTER_xTaskCreateAffinitySet( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, uxCoreAffinityMask, pxCreatedTask );
1760
1761             pxNewTCB = prvCreateTask( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
1762
1763             if( pxNewTCB != NULL )
1764             {
1765                 /* Set the task's affinity before scheduling it. */
1766                 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1767
1768                 prvAddNewTaskToReadyList( pxNewTCB );
1769                 xReturn = pdPASS;
1770             }
1771             else
1772             {
1773                 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1774             }
1775
1776             traceRETURN_xTaskCreateAffinitySet( xReturn );
1777
1778             return xReturn;
1779         }
1780     #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1781
1782 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1783 /*-----------------------------------------------------------*/
1784
1785 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
1786                                   const char * const pcName,
1787                                   const uint32_t ulStackDepth,
1788                                   void * const pvParameters,
1789                                   UBaseType_t uxPriority,
1790                                   TaskHandle_t * const pxCreatedTask,
1791                                   TCB_t * pxNewTCB,
1792                                   const MemoryRegion_t * const xRegions )
1793 {
1794     StackType_t * pxTopOfStack;
1795     UBaseType_t x;
1796
1797     #if ( portUSING_MPU_WRAPPERS == 1 )
1798         /* Should the task be created in privileged mode? */
1799         BaseType_t xRunPrivileged;
1800
1801         if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
1802         {
1803             xRunPrivileged = pdTRUE;
1804         }
1805         else
1806         {
1807             xRunPrivileged = pdFALSE;
1808         }
1809         uxPriority &= ~portPRIVILEGE_BIT;
1810     #endif /* portUSING_MPU_WRAPPERS == 1 */
1811
1812     /* Avoid dependency on memset() if it is not required. */
1813     #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
1814     {
1815         /* Fill the stack with a known value to assist debugging. */
1816         ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
1817     }
1818     #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
1819
1820     /* Calculate the top of stack address.  This depends on whether the stack
1821      * grows from high memory to low (as per the 80x86) or vice versa.
1822      * portSTACK_GROWTH is used to make the result positive or negative as required
1823      * by the port. */
1824     #if ( portSTACK_GROWTH < 0 )
1825     {
1826         pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
1827         pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
1828
1829         /* Check the alignment of the calculated top of stack is correct. */
1830         configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
1831
1832         #if ( configRECORD_STACK_HIGH_ADDRESS == 1 )
1833         {
1834             /* Also record the stack's high address, which may assist
1835              * debugging. */
1836             pxNewTCB->pxEndOfStack = pxTopOfStack;
1837         }
1838         #endif /* configRECORD_STACK_HIGH_ADDRESS */
1839     }
1840     #else /* portSTACK_GROWTH */
1841     {
1842         pxTopOfStack = pxNewTCB->pxStack;
1843         pxTopOfStack = ( StackType_t * ) ( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) + portBYTE_ALIGNMENT_MASK ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
1844
1845         /* Check the alignment of the calculated top of stack is correct. */
1846         configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
1847
1848         /* The other extreme of the stack space is required if stack checking is
1849          * performed. */
1850         pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
1851     }
1852     #endif /* portSTACK_GROWTH */
1853
1854     /* Store the task name in the TCB. */
1855     if( pcName != NULL )
1856     {
1857         for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
1858         {
1859             pxNewTCB->pcTaskName[ x ] = pcName[ x ];
1860
1861             /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
1862              * configMAX_TASK_NAME_LEN characters just in case the memory after the
1863              * string is not accessible (extremely unlikely). */
1864             if( pcName[ x ] == ( char ) 0x00 )
1865             {
1866                 break;
1867             }
1868             else
1869             {
1870                 mtCOVERAGE_TEST_MARKER();
1871             }
1872         }
1873
1874         /* Ensure the name string is terminated in the case that the string length
1875          * was greater or equal to configMAX_TASK_NAME_LEN. */
1876         pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1U ] = '\0';
1877     }
1878     else
1879     {
1880         mtCOVERAGE_TEST_MARKER();
1881     }
1882
1883     /* This is used as an array index so must ensure it's not too large. */
1884     configASSERT( uxPriority < configMAX_PRIORITIES );
1885
1886     if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1887     {
1888         uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1889     }
1890     else
1891     {
1892         mtCOVERAGE_TEST_MARKER();
1893     }
1894
1895     pxNewTCB->uxPriority = uxPriority;
1896     #if ( configUSE_MUTEXES == 1 )
1897     {
1898         pxNewTCB->uxBasePriority = uxPriority;
1899     }
1900     #endif /* configUSE_MUTEXES */
1901
1902     vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
1903     vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
1904
1905     /* Set the pxNewTCB as a link back from the ListItem_t.  This is so we can get
1906      * back to  the containing TCB from a generic item in a list. */
1907     listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
1908
1909     /* Event lists are always in priority order. */
1910     listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority );
1911     listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
1912
1913     #if ( portUSING_MPU_WRAPPERS == 1 )
1914     {
1915         vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1916     }
1917     #else
1918     {
1919         /* Avoid compiler warning about unreferenced parameter. */
1920         ( void ) xRegions;
1921     }
1922     #endif
1923
1924     #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
1925     {
1926         /* Allocate and initialize memory for the task's TLS Block. */
1927         configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack );
1928     }
1929     #endif
1930
1931     /* Initialize the TCB stack to look as if the task was already running,
1932      * but had been interrupted by the scheduler.  The return address is set
1933      * to the start of the task function. Once the stack has been initialised
1934      * the top of stack variable is updated. */
1935     #if ( portUSING_MPU_WRAPPERS == 1 )
1936     {
1937         /* If the port has capability to detect stack overflow,
1938          * pass the stack end address to the stack initialization
1939          * function as well. */
1940         #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1941         {
1942             #if ( portSTACK_GROWTH < 0 )
1943             {
1944                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1945             }
1946             #else /* portSTACK_GROWTH */
1947             {
1948                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1949             }
1950             #endif /* portSTACK_GROWTH */
1951         }
1952         #else /* portHAS_STACK_OVERFLOW_CHECKING */
1953         {
1954             pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1955         }
1956         #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1957     }
1958     #else /* portUSING_MPU_WRAPPERS */
1959     {
1960         /* If the port has capability to detect stack overflow,
1961          * pass the stack end address to the stack initialization
1962          * function as well. */
1963         #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1964         {
1965             #if ( portSTACK_GROWTH < 0 )
1966             {
1967                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1968             }
1969             #else /* portSTACK_GROWTH */
1970             {
1971                 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1972             }
1973             #endif /* portSTACK_GROWTH */
1974         }
1975         #else /* portHAS_STACK_OVERFLOW_CHECKING */
1976         {
1977             pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1978         }
1979         #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1980     }
1981     #endif /* portUSING_MPU_WRAPPERS */
1982
1983     /* Initialize task state and task attributes. */
1984     #if ( configNUMBER_OF_CORES > 1 )
1985     {
1986         pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING;
1987
1988         /* Is this an idle task? */
1989         if( ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvIdleTask ) || ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvPassiveIdleTask ) )
1990         {
1991             pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE;
1992         }
1993     }
1994     #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
1995
1996     if( pxCreatedTask != NULL )
1997     {
1998         /* Pass the handle out in an anonymous way.  The handle can be used to
1999          * change the created task's priority, delete the created task, etc.*/
2000         *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
2001     }
2002     else
2003     {
2004         mtCOVERAGE_TEST_MARKER();
2005     }
2006 }
2007 /*-----------------------------------------------------------*/
2008
2009 #if ( configNUMBER_OF_CORES == 1 )
2010
2011     static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
2012     {
2013         /* Ensure interrupts don't access the task lists while the lists are being
2014          * updated. */
2015         taskENTER_CRITICAL();
2016         {
2017             uxCurrentNumberOfTasks++;
2018
2019             if( pxCurrentTCB == NULL )
2020             {
2021                 /* There are no other tasks, or all the other tasks are in
2022                  * the suspended state - make this the current task. */
2023                 pxCurrentTCB = pxNewTCB;
2024
2025                 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
2026                 {
2027                     /* This is the first task to be created so do the preliminary
2028                      * initialisation required.  We will not recover if this call
2029                      * fails, but we will report the failure. */
2030                     prvInitialiseTaskLists();
2031                 }
2032                 else
2033                 {
2034                     mtCOVERAGE_TEST_MARKER();
2035                 }
2036             }
2037             else
2038             {
2039                 /* If the scheduler is not already running, make this task the
2040                  * current task if it is the highest priority task to be created
2041                  * so far. */
2042                 if( xSchedulerRunning == pdFALSE )
2043                 {
2044                     if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
2045                     {
2046                         pxCurrentTCB = pxNewTCB;
2047                     }
2048                     else
2049                     {
2050                         mtCOVERAGE_TEST_MARKER();
2051                     }
2052                 }
2053                 else
2054                 {
2055                     mtCOVERAGE_TEST_MARKER();
2056                 }
2057             }
2058
2059             uxTaskNumber++;
2060
2061             #if ( configUSE_TRACE_FACILITY == 1 )
2062             {
2063                 /* Add a counter into the TCB for tracing only. */
2064                 pxNewTCB->uxTCBNumber = uxTaskNumber;
2065             }
2066             #endif /* configUSE_TRACE_FACILITY */
2067             traceTASK_CREATE( pxNewTCB );
2068
2069             prvAddTaskToReadyList( pxNewTCB );
2070
2071             portSETUP_TCB( pxNewTCB );
2072         }
2073         taskEXIT_CRITICAL();
2074
2075         if( xSchedulerRunning != pdFALSE )
2076         {
2077             /* If the created task is of a higher priority than the current task
2078              * then it should run now. */
2079             taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
2080         }
2081         else
2082         {
2083             mtCOVERAGE_TEST_MARKER();
2084         }
2085     }
2086
2087 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2088
2089     static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
2090     {
2091         /* Ensure interrupts don't access the task lists while the lists are being
2092          * updated. */
2093         taskENTER_CRITICAL();
2094         {
2095             uxCurrentNumberOfTasks++;
2096
2097             if( xSchedulerRunning == pdFALSE )
2098             {
2099                 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
2100                 {
2101                     /* This is the first task to be created so do the preliminary
2102                      * initialisation required.  We will not recover if this call
2103                      * fails, but we will report the failure. */
2104                     prvInitialiseTaskLists();
2105                 }
2106                 else
2107                 {
2108                     mtCOVERAGE_TEST_MARKER();
2109                 }
2110
2111                 /* All the cores start with idle tasks before the SMP scheduler
2112                  * is running. Idle tasks are assigned to cores when they are
2113                  * created in prvCreateIdleTasks(). */
2114             }
2115
2116             uxTaskNumber++;
2117
2118             #if ( configUSE_TRACE_FACILITY == 1 )
2119             {
2120                 /* Add a counter into the TCB for tracing only. */
2121                 pxNewTCB->uxTCBNumber = uxTaskNumber;
2122             }
2123             #endif /* configUSE_TRACE_FACILITY */
2124             traceTASK_CREATE( pxNewTCB );
2125
2126             prvAddTaskToReadyList( pxNewTCB );
2127
2128             portSETUP_TCB( pxNewTCB );
2129
2130             if( xSchedulerRunning != pdFALSE )
2131             {
2132                 /* If the created task is of a higher priority than another
2133                  * currently running task and preemption is on then it should
2134                  * run now. */
2135                 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
2136             }
2137             else
2138             {
2139                 mtCOVERAGE_TEST_MARKER();
2140             }
2141         }
2142         taskEXIT_CRITICAL();
2143     }
2144
2145 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2146 /*-----------------------------------------------------------*/
2147
2148 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
2149
2150     static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
2151                                                         size_t n )
2152     {
2153         size_t uxCharsWritten;
2154
2155         if( iSnprintfReturnValue < 0 )
2156         {
2157             /* Encoding error - Return 0 to indicate that nothing
2158              * was written to the buffer. */
2159             uxCharsWritten = 0;
2160         }
2161         else if( iSnprintfReturnValue >= ( int ) n )
2162         {
2163             /* This is the case when the supplied buffer is not
2164              * large to hold the generated string. Return the
2165              * number of characters actually written without
2166              * counting the terminating NULL character. */
2167             uxCharsWritten = n - 1U;
2168         }
2169         else
2170         {
2171             /* Complete string was written to the buffer. */
2172             uxCharsWritten = ( size_t ) iSnprintfReturnValue;
2173         }
2174
2175         return uxCharsWritten;
2176     }
2177
2178 #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
2179 /*-----------------------------------------------------------*/
2180
2181 #if ( INCLUDE_vTaskDelete == 1 )
2182
2183     void vTaskDelete( TaskHandle_t xTaskToDelete )
2184     {
2185         TCB_t * pxTCB;
2186
2187         traceENTER_vTaskDelete( xTaskToDelete );
2188
2189         taskENTER_CRITICAL();
2190         {
2191             /* If null is passed in here then it is the calling task that is
2192              * being deleted. */
2193             pxTCB = prvGetTCBFromHandle( xTaskToDelete );
2194
2195             /* Remove task from the ready/delayed list. */
2196             if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
2197             {
2198                 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
2199             }
2200             else
2201             {
2202                 mtCOVERAGE_TEST_MARKER();
2203             }
2204
2205             /* Is the task waiting on an event also? */
2206             if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2207             {
2208                 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2209             }
2210             else
2211             {
2212                 mtCOVERAGE_TEST_MARKER();
2213             }
2214
2215             /* Increment the uxTaskNumber also so kernel aware debuggers can
2216              * detect that the task lists need re-generating.  This is done before
2217              * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
2218              * not return. */
2219             uxTaskNumber++;
2220
2221             /* If the task is running (or yielding), we must add it to the
2222              * termination list so that an idle task can delete it when it is
2223              * no longer running. */
2224             if( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE )
2225             {
2226                 /* A running task or a task which is scheduled to yield is being
2227                  * deleted. This cannot complete when the task is still running
2228                  * on a core, as a context switch to another task is required.
2229                  * Place the task in the termination list. The idle task will check
2230                  * the termination list and free up any memory allocated by the
2231                  * scheduler for the TCB and stack of the deleted task. */
2232                 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
2233
2234                 /* Increment the ucTasksDeleted variable so the idle task knows
2235                  * there is a task that has been deleted and that it should therefore
2236                  * check the xTasksWaitingTermination list. */
2237                 ++uxDeletedTasksWaitingCleanUp;
2238
2239                 /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
2240                  * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
2241                 traceTASK_DELETE( pxTCB );
2242
2243                 /* The pre-delete hook is primarily for the Windows simulator,
2244                  * in which Windows specific clean up operations are performed,
2245                  * after which it is not possible to yield away from this task -
2246                  * hence xYieldPending is used to latch that a context switch is
2247                  * required. */
2248                 #if ( configNUMBER_OF_CORES == 1 )
2249                     portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) );
2250                 #else
2251                     portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
2252                 #endif
2253             }
2254             else
2255             {
2256                 --uxCurrentNumberOfTasks;
2257                 traceTASK_DELETE( pxTCB );
2258
2259                 /* Reset the next expected unblock time in case it referred to
2260                  * the task that has just been deleted. */
2261                 prvResetNextTaskUnblockTime();
2262             }
2263         }
2264
2265         #if ( configNUMBER_OF_CORES == 1 )
2266         {
2267             taskEXIT_CRITICAL();
2268
2269             /* If the task is not deleting itself, call prvDeleteTCB from outside of
2270              * critical section. If a task deletes itself, prvDeleteTCB is called
2271              * from prvCheckTasksWaitingTermination which is called from Idle task. */
2272             if( pxTCB != pxCurrentTCB )
2273             {
2274                 prvDeleteTCB( pxTCB );
2275             }
2276
2277             /* Force a reschedule if it is the currently running task that has just
2278              * been deleted. */
2279             if( xSchedulerRunning != pdFALSE )
2280             {
2281                 if( pxTCB == pxCurrentTCB )
2282                 {
2283                     configASSERT( uxSchedulerSuspended == 0 );
2284                     portYIELD_WITHIN_API();
2285                 }
2286                 else
2287                 {
2288                     mtCOVERAGE_TEST_MARKER();
2289                 }
2290             }
2291         }
2292         #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2293         {
2294             /* If a running task is not deleting itself, call prvDeleteTCB. If a running
2295              * task deletes itself, prvDeleteTCB is called from prvCheckTasksWaitingTermination
2296              * which is called from Idle task. */
2297             if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
2298             {
2299                 prvDeleteTCB( pxTCB );
2300             }
2301
2302             /* Force a reschedule if the task that has just been deleted was running. */
2303             if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) )
2304             {
2305                 if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
2306                 {
2307                     configASSERT( uxSchedulerSuspended == 0 );
2308                     vTaskYieldWithinAPI();
2309                 }
2310                 else
2311                 {
2312                     prvYieldCore( pxTCB->xTaskRunState );
2313                 }
2314             }
2315
2316             taskEXIT_CRITICAL();
2317         }
2318         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2319
2320         traceRETURN_vTaskDelete();
2321     }
2322
2323 #endif /* INCLUDE_vTaskDelete */
2324 /*-----------------------------------------------------------*/
2325
2326 #if ( INCLUDE_xTaskDelayUntil == 1 )
2327
2328     BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
2329                                 const TickType_t xTimeIncrement )
2330     {
2331         TickType_t xTimeToWake;
2332         BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
2333
2334         traceENTER_xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement );
2335
2336         configASSERT( pxPreviousWakeTime );
2337         configASSERT( ( xTimeIncrement > 0U ) );
2338
2339         vTaskSuspendAll();
2340         {
2341             /* Minor optimisation.  The tick count cannot change in this
2342              * block. */
2343             const TickType_t xConstTickCount = xTickCount;
2344
2345             configASSERT( uxSchedulerSuspended == 1U );
2346
2347             /* Generate the tick time at which the task wants to wake. */
2348             xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
2349
2350             if( xConstTickCount < *pxPreviousWakeTime )
2351             {
2352                 /* The tick count has overflowed since this function was
2353                  * lasted called.  In this case the only time we should ever
2354                  * actually delay is if the wake time has also  overflowed,
2355                  * and the wake time is greater than the tick time.  When this
2356                  * is the case it is as if neither time had overflowed. */
2357                 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
2358                 {
2359                     xShouldDelay = pdTRUE;
2360                 }
2361                 else
2362                 {
2363                     mtCOVERAGE_TEST_MARKER();
2364                 }
2365             }
2366             else
2367             {
2368                 /* The tick time has not overflowed.  In this case we will
2369                  * delay if either the wake time has overflowed, and/or the
2370                  * tick time is less than the wake time. */
2371                 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
2372                 {
2373                     xShouldDelay = pdTRUE;
2374                 }
2375                 else
2376                 {
2377                     mtCOVERAGE_TEST_MARKER();
2378                 }
2379             }
2380
2381             /* Update the wake time ready for the next call. */
2382             *pxPreviousWakeTime = xTimeToWake;
2383
2384             if( xShouldDelay != pdFALSE )
2385             {
2386                 traceTASK_DELAY_UNTIL( xTimeToWake );
2387
2388                 /* prvAddCurrentTaskToDelayedList() needs the block time, not
2389                  * the time to wake, so subtract the current tick count. */
2390                 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
2391             }
2392             else
2393             {
2394                 mtCOVERAGE_TEST_MARKER();
2395             }
2396         }
2397         xAlreadyYielded = xTaskResumeAll();
2398
2399         /* Force a reschedule if xTaskResumeAll has not already done so, we may
2400          * have put ourselves to sleep. */
2401         if( xAlreadyYielded == pdFALSE )
2402         {
2403             taskYIELD_WITHIN_API();
2404         }
2405         else
2406         {
2407             mtCOVERAGE_TEST_MARKER();
2408         }
2409
2410         traceRETURN_xTaskDelayUntil( xShouldDelay );
2411
2412         return xShouldDelay;
2413     }
2414
2415 #endif /* INCLUDE_xTaskDelayUntil */
2416 /*-----------------------------------------------------------*/
2417
2418 #if ( INCLUDE_vTaskDelay == 1 )
2419
2420     void vTaskDelay( const TickType_t xTicksToDelay )
2421     {
2422         BaseType_t xAlreadyYielded = pdFALSE;
2423
2424         traceENTER_vTaskDelay( xTicksToDelay );
2425
2426         /* A delay time of zero just forces a reschedule. */
2427         if( xTicksToDelay > ( TickType_t ) 0U )
2428         {
2429             vTaskSuspendAll();
2430             {
2431                 configASSERT( uxSchedulerSuspended == 1U );
2432
2433                 traceTASK_DELAY();
2434
2435                 /* A task that is removed from the event list while the
2436                  * scheduler is suspended will not get placed in the ready
2437                  * list or removed from the blocked list until the scheduler
2438                  * is resumed.
2439                  *
2440                  * This task cannot be in an event list as it is the currently
2441                  * executing task. */
2442                 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
2443             }
2444             xAlreadyYielded = xTaskResumeAll();
2445         }
2446         else
2447         {
2448             mtCOVERAGE_TEST_MARKER();
2449         }
2450
2451         /* Force a reschedule if xTaskResumeAll has not already done so, we may
2452          * have put ourselves to sleep. */
2453         if( xAlreadyYielded == pdFALSE )
2454         {
2455             taskYIELD_WITHIN_API();
2456         }
2457         else
2458         {
2459             mtCOVERAGE_TEST_MARKER();
2460         }
2461
2462         traceRETURN_vTaskDelay();
2463     }
2464
2465 #endif /* INCLUDE_vTaskDelay */
2466 /*-----------------------------------------------------------*/
2467
2468 #if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
2469
2470     eTaskState eTaskGetState( TaskHandle_t xTask )
2471     {
2472         eTaskState eReturn;
2473         List_t const * pxStateList;
2474         List_t const * pxEventList;
2475         List_t const * pxDelayedList;
2476         List_t const * pxOverflowedDelayedList;
2477         const TCB_t * const pxTCB = xTask;
2478
2479         traceENTER_eTaskGetState( xTask );
2480
2481         configASSERT( pxTCB );
2482
2483         #if ( configNUMBER_OF_CORES == 1 )
2484             if( pxTCB == pxCurrentTCB )
2485             {
2486                 /* The task calling this function is querying its own state. */
2487                 eReturn = eRunning;
2488             }
2489             else
2490         #endif
2491         {
2492             taskENTER_CRITICAL();
2493             {
2494                 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
2495                 pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) );
2496                 pxDelayedList = pxDelayedTaskList;
2497                 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
2498             }
2499             taskEXIT_CRITICAL();
2500
2501             if( pxEventList == &xPendingReadyList )
2502             {
2503                 /* The task has been placed on the pending ready list, so its
2504                  * state is eReady regardless of what list the task's state list
2505                  * item is currently placed on. */
2506                 eReturn = eReady;
2507             }
2508             else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
2509             {
2510                 /* The task being queried is referenced from one of the Blocked
2511                  * lists. */
2512                 eReturn = eBlocked;
2513             }
2514
2515             #if ( INCLUDE_vTaskSuspend == 1 )
2516                 else if( pxStateList == &xSuspendedTaskList )
2517                 {
2518                     /* The task being queried is referenced from the suspended
2519                      * list.  Is it genuinely suspended or is it blocked
2520                      * indefinitely? */
2521                     if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
2522                     {
2523                         #if ( configUSE_TASK_NOTIFICATIONS == 1 )
2524                         {
2525                             BaseType_t x;
2526
2527                             /* The task does not appear on the event list item of
2528                              * and of the RTOS objects, but could still be in the
2529                              * blocked state if it is waiting on its notification
2530                              * rather than waiting on an object.  If not, is
2531                              * suspended. */
2532                             eReturn = eSuspended;
2533
2534                             for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
2535                             {
2536                                 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
2537                                 {
2538                                     eReturn = eBlocked;
2539                                     break;
2540                                 }
2541                             }
2542                         }
2543                         #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2544                         {
2545                             eReturn = eSuspended;
2546                         }
2547                         #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2548                     }
2549                     else
2550                     {
2551                         eReturn = eBlocked;
2552                     }
2553                 }
2554             #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
2555
2556             #if ( INCLUDE_vTaskDelete == 1 )
2557                 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
2558                 {
2559                     /* The task being queried is referenced from the deleted
2560                      * tasks list, or it is not referenced from any lists at
2561                      * all. */
2562                     eReturn = eDeleted;
2563                 }
2564             #endif
2565
2566             else
2567             {
2568                 #if ( configNUMBER_OF_CORES == 1 )
2569                 {
2570                     /* If the task is not in any other state, it must be in the
2571                      * Ready (including pending ready) state. */
2572                     eReturn = eReady;
2573                 }
2574                 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2575                 {
2576                     if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2577                     {
2578                         /* Is it actively running on a core? */
2579                         eReturn = eRunning;
2580                     }
2581                     else
2582                     {
2583                         /* If the task is not in any other state, it must be in the
2584                          * Ready (including pending ready) state. */
2585                         eReturn = eReady;
2586                     }
2587                 }
2588                 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2589             }
2590         }
2591
2592         traceRETURN_eTaskGetState( eReturn );
2593
2594         return eReturn;
2595     }
2596
2597 #endif /* INCLUDE_eTaskGetState */
2598 /*-----------------------------------------------------------*/
2599
2600 #if ( INCLUDE_uxTaskPriorityGet == 1 )
2601
2602     UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
2603     {
2604         TCB_t const * pxTCB;
2605         UBaseType_t uxReturn;
2606
2607         traceENTER_uxTaskPriorityGet( xTask );
2608
2609         taskENTER_CRITICAL();
2610         {
2611             /* If null is passed in here then it is the priority of the task
2612              * that called uxTaskPriorityGet() that is being queried. */
2613             pxTCB = prvGetTCBFromHandle( xTask );
2614             uxReturn = pxTCB->uxPriority;
2615         }
2616         taskEXIT_CRITICAL();
2617
2618         traceRETURN_uxTaskPriorityGet( uxReturn );
2619
2620         return uxReturn;
2621     }
2622
2623 #endif /* INCLUDE_uxTaskPriorityGet */
2624 /*-----------------------------------------------------------*/
2625
2626 #if ( INCLUDE_uxTaskPriorityGet == 1 )
2627
2628     UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
2629     {
2630         TCB_t const * pxTCB;
2631         UBaseType_t uxReturn;
2632         UBaseType_t uxSavedInterruptStatus;
2633
2634         traceENTER_uxTaskPriorityGetFromISR( xTask );
2635
2636         /* RTOS ports that support interrupt nesting have the concept of a
2637          * maximum  system call (or maximum API call) interrupt priority.
2638          * Interrupts that are  above the maximum system call priority are keep
2639          * permanently enabled, even when the RTOS kernel is in a critical section,
2640          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
2641          * is defined in FreeRTOSConfig.h then
2642          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2643          * failure if a FreeRTOS API function is called from an interrupt that has
2644          * been assigned a priority above the configured maximum system call
2645          * priority.  Only FreeRTOS functions that end in FromISR can be called
2646          * from interrupts  that have been assigned a priority at or (logically)
2647          * below the maximum system call interrupt priority.  FreeRTOS maintains a
2648          * separate interrupt safe API to ensure interrupt entry is as fast and as
2649          * simple as possible.  More information (albeit Cortex-M specific) is
2650          * provided on the following link:
2651          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2652         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2653
2654         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2655         {
2656             /* If null is passed in here then it is the priority of the calling
2657              * task that is being queried. */
2658             pxTCB = prvGetTCBFromHandle( xTask );
2659             uxReturn = pxTCB->uxPriority;
2660         }
2661         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2662
2663         traceRETURN_uxTaskPriorityGetFromISR( uxReturn );
2664
2665         return uxReturn;
2666     }
2667
2668 #endif /* INCLUDE_uxTaskPriorityGet */
2669 /*-----------------------------------------------------------*/
2670
2671 #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) )
2672
2673     UBaseType_t uxTaskBasePriorityGet( const TaskHandle_t xTask )
2674     {
2675         TCB_t const * pxTCB;
2676         UBaseType_t uxReturn;
2677
2678         traceENTER_uxTaskBasePriorityGet( xTask );
2679
2680         taskENTER_CRITICAL();
2681         {
2682             /* If null is passed in here then it is the base priority of the task
2683              * that called uxTaskBasePriorityGet() that is being queried. */
2684             pxTCB = prvGetTCBFromHandle( xTask );
2685             uxReturn = pxTCB->uxBasePriority;
2686         }
2687         taskEXIT_CRITICAL();
2688
2689         traceRETURN_uxTaskBasePriorityGet( uxReturn );
2690
2691         return uxReturn;
2692     }
2693
2694 #endif /* #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) ) */
2695 /*-----------------------------------------------------------*/
2696
2697 #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) )
2698
2699     UBaseType_t uxTaskBasePriorityGetFromISR( const TaskHandle_t xTask )
2700     {
2701         TCB_t const * pxTCB;
2702         UBaseType_t uxReturn;
2703         UBaseType_t uxSavedInterruptStatus;
2704
2705         traceENTER_uxTaskBasePriorityGetFromISR( xTask );
2706
2707         /* RTOS ports that support interrupt nesting have the concept of a
2708          * maximum  system call (or maximum API call) interrupt priority.
2709          * Interrupts that are  above the maximum system call priority are keep
2710          * permanently enabled, even when the RTOS kernel is in a critical section,
2711          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
2712          * is defined in FreeRTOSConfig.h then
2713          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2714          * failure if a FreeRTOS API function is called from an interrupt that has
2715          * been assigned a priority above the configured maximum system call
2716          * priority.  Only FreeRTOS functions that end in FromISR can be called
2717          * from interrupts  that have been assigned a priority at or (logically)
2718          * below the maximum system call interrupt priority.  FreeRTOS maintains a
2719          * separate interrupt safe API to ensure interrupt entry is as fast and as
2720          * simple as possible.  More information (albeit Cortex-M specific) is
2721          * provided on the following link:
2722          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2723         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2724
2725         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2726         {
2727             /* If null is passed in here then it is the base priority of the calling
2728              * task that is being queried. */
2729             pxTCB = prvGetTCBFromHandle( xTask );
2730             uxReturn = pxTCB->uxBasePriority;
2731         }
2732         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2733
2734         traceRETURN_uxTaskBasePriorityGetFromISR( uxReturn );
2735
2736         return uxReturn;
2737     }
2738
2739 #endif /* #if ( ( INCLUDE_uxTaskPriorityGet == 1 ) && ( configUSE_MUTEXES == 1 ) ) */
2740 /*-----------------------------------------------------------*/
2741
2742 #if ( INCLUDE_vTaskPrioritySet == 1 )
2743
2744     void vTaskPrioritySet( TaskHandle_t xTask,
2745                            UBaseType_t uxNewPriority )
2746     {
2747         TCB_t * pxTCB;
2748         UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
2749         BaseType_t xYieldRequired = pdFALSE;
2750
2751         #if ( configNUMBER_OF_CORES > 1 )
2752             BaseType_t xYieldForTask = pdFALSE;
2753         #endif
2754
2755         traceENTER_vTaskPrioritySet( xTask, uxNewPriority );
2756
2757         configASSERT( uxNewPriority < configMAX_PRIORITIES );
2758
2759         /* Ensure the new priority is valid. */
2760         if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
2761         {
2762             uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
2763         }
2764         else
2765         {
2766             mtCOVERAGE_TEST_MARKER();
2767         }
2768
2769         taskENTER_CRITICAL();
2770         {
2771             /* If null is passed in here then it is the priority of the calling
2772              * task that is being changed. */
2773             pxTCB = prvGetTCBFromHandle( xTask );
2774
2775             traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
2776
2777             #if ( configUSE_MUTEXES == 1 )
2778             {
2779                 uxCurrentBasePriority = pxTCB->uxBasePriority;
2780             }
2781             #else
2782             {
2783                 uxCurrentBasePriority = pxTCB->uxPriority;
2784             }
2785             #endif
2786
2787             if( uxCurrentBasePriority != uxNewPriority )
2788             {
2789                 /* The priority change may have readied a task of higher
2790                  * priority than a running task. */
2791                 if( uxNewPriority > uxCurrentBasePriority )
2792                 {
2793                     #if ( configNUMBER_OF_CORES == 1 )
2794                     {
2795                         if( pxTCB != pxCurrentTCB )
2796                         {
2797                             /* The priority of a task other than the currently
2798                              * running task is being raised.  Is the priority being
2799                              * raised above that of the running task? */
2800                             if( uxNewPriority > pxCurrentTCB->uxPriority )
2801                             {
2802                                 xYieldRequired = pdTRUE;
2803                             }
2804                             else
2805                             {
2806                                 mtCOVERAGE_TEST_MARKER();
2807                             }
2808                         }
2809                         else
2810                         {
2811                             /* The priority of the running task is being raised,
2812                              * but the running task must already be the highest
2813                              * priority task able to run so no yield is required. */
2814                         }
2815                     }
2816                     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2817                     {
2818                         /* The priority of a task is being raised so
2819                          * perform a yield for this task later. */
2820                         xYieldForTask = pdTRUE;
2821                     }
2822                     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2823                 }
2824                 else if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2825                 {
2826                     /* Setting the priority of a running task down means
2827                      * there may now be another task of higher priority that
2828                      * is ready to execute. */
2829                     #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
2830                         if( pxTCB->xPreemptionDisable == pdFALSE )
2831                     #endif
2832                     {
2833                         xYieldRequired = pdTRUE;
2834                     }
2835                 }
2836                 else
2837                 {
2838                     /* Setting the priority of any other task down does not
2839                      * require a yield as the running task must be above the
2840                      * new priority of the task being modified. */
2841                 }
2842
2843                 /* Remember the ready list the task might be referenced from
2844                  * before its uxPriority member is changed so the
2845                  * taskRESET_READY_PRIORITY() macro can function correctly. */
2846                 uxPriorityUsedOnEntry = pxTCB->uxPriority;
2847
2848                 #if ( configUSE_MUTEXES == 1 )
2849                 {
2850                     /* Only change the priority being used if the task is not
2851                      * currently using an inherited priority or the new priority
2852                      * is bigger than the inherited priority. */
2853                     if( ( pxTCB->uxBasePriority == pxTCB->uxPriority ) || ( uxNewPriority > pxTCB->uxPriority ) )
2854                     {
2855                         pxTCB->uxPriority = uxNewPriority;
2856                     }
2857                     else
2858                     {
2859                         mtCOVERAGE_TEST_MARKER();
2860                     }
2861
2862                     /* The base priority gets set whatever. */
2863                     pxTCB->uxBasePriority = uxNewPriority;
2864                 }
2865                 #else /* if ( configUSE_MUTEXES == 1 ) */
2866                 {
2867                     pxTCB->uxPriority = uxNewPriority;
2868                 }
2869                 #endif /* if ( configUSE_MUTEXES == 1 ) */
2870
2871                 /* Only reset the event list item value if the value is not
2872                  * being used for anything else. */
2873                 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0UL ) )
2874                 {
2875                     listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) );
2876                 }
2877                 else
2878                 {
2879                     mtCOVERAGE_TEST_MARKER();
2880                 }
2881
2882                 /* If the task is in the blocked or suspended list we need do
2883                  * nothing more than change its priority variable. However, if
2884                  * the task is in a ready list it needs to be removed and placed
2885                  * in the list appropriate to its new priority. */
2886                 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
2887                 {
2888                     /* The task is currently in its ready list - remove before
2889                      * adding it to its new ready list.  As we are in a critical
2890                      * section we can do this even if the scheduler is suspended. */
2891                     if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
2892                     {
2893                         /* It is known that the task is in its ready list so
2894                          * there is no need to check again and the port level
2895                          * reset macro can be called directly. */
2896                         portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
2897                     }
2898                     else
2899                     {
2900                         mtCOVERAGE_TEST_MARKER();
2901                     }
2902
2903                     prvAddTaskToReadyList( pxTCB );
2904                 }
2905                 else
2906                 {
2907                     #if ( configNUMBER_OF_CORES == 1 )
2908                     {
2909                         mtCOVERAGE_TEST_MARKER();
2910                     }
2911                     #else
2912                     {
2913                         /* It's possible that xYieldForTask was already set to pdTRUE because
2914                          * its priority is being raised. However, since it is not in a ready list
2915                          * we don't actually need to yield for it. */
2916                         xYieldForTask = pdFALSE;
2917                     }
2918                     #endif
2919                 }
2920
2921                 if( xYieldRequired != pdFALSE )
2922                 {
2923                     /* The running task priority is set down. Request the task to yield. */
2924                     taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB );
2925                 }
2926                 else
2927                 {
2928                     #if ( configNUMBER_OF_CORES > 1 )
2929                         if( xYieldForTask != pdFALSE )
2930                         {
2931                             /* The priority of the task is being raised. If a running
2932                              * task has priority lower than this task, it should yield
2933                              * for this task. */
2934                             taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
2935                         }
2936                         else
2937                     #endif /* if ( configNUMBER_OF_CORES > 1 ) */
2938                     {
2939                         mtCOVERAGE_TEST_MARKER();
2940                     }
2941                 }
2942
2943                 /* Remove compiler warning about unused variables when the port
2944                  * optimised task selection is not being used. */
2945                 ( void ) uxPriorityUsedOnEntry;
2946             }
2947         }
2948         taskEXIT_CRITICAL();
2949
2950         traceRETURN_vTaskPrioritySet();
2951     }
2952
2953 #endif /* INCLUDE_vTaskPrioritySet */
2954 /*-----------------------------------------------------------*/
2955
2956 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
2957     void vTaskCoreAffinitySet( const TaskHandle_t xTask,
2958                                UBaseType_t uxCoreAffinityMask )
2959     {
2960         TCB_t * pxTCB;
2961         BaseType_t xCoreID;
2962         UBaseType_t uxPrevCoreAffinityMask;
2963
2964         #if ( configUSE_PREEMPTION == 1 )
2965             UBaseType_t uxPrevNotAllowedCores;
2966         #endif
2967
2968         traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask );
2969
2970         taskENTER_CRITICAL();
2971         {
2972             pxTCB = prvGetTCBFromHandle( xTask );
2973
2974             uxPrevCoreAffinityMask = pxTCB->uxCoreAffinityMask;
2975             pxTCB->uxCoreAffinityMask = uxCoreAffinityMask;
2976
2977             if( xSchedulerRunning != pdFALSE )
2978             {
2979                 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2980                 {
2981                     xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
2982
2983                     /* If the task can no longer run on the core it was running,
2984                      * request the core to yield. */
2985                     if( ( uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) == 0U )
2986                     {
2987                         prvYieldCore( xCoreID );
2988                     }
2989                 }
2990                 else
2991                 {
2992                     #if ( configUSE_PREEMPTION == 1 )
2993                     {
2994                         /* Calculate the cores on which this task was not allowed to
2995                          * run previously. */
2996                         uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1U << configNUMBER_OF_CORES ) - 1U );
2997
2998                         /* Does the new core mask enables this task to run on any of the
2999                          * previously not allowed cores? If yes, check if this task can be
3000                          * scheduled on any of those cores. */
3001                         if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U )
3002                         {
3003                             prvYieldForTask( pxTCB );
3004                         }
3005                     }
3006                     #else /* #if( configUSE_PREEMPTION == 1 ) */
3007                     {
3008                         mtCOVERAGE_TEST_MARKER();
3009                     }
3010                     #endif /* #if( configUSE_PREEMPTION == 1 ) */
3011                 }
3012             }
3013         }
3014         taskEXIT_CRITICAL();
3015
3016         traceRETURN_vTaskCoreAffinitySet();
3017     }
3018 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
3019 /*-----------------------------------------------------------*/
3020
3021 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
3022     UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask )
3023     {
3024         const TCB_t * pxTCB;
3025         UBaseType_t uxCoreAffinityMask;
3026
3027         traceENTER_vTaskCoreAffinityGet( xTask );
3028
3029         taskENTER_CRITICAL();
3030         {
3031             pxTCB = prvGetTCBFromHandle( xTask );
3032             uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
3033         }
3034         taskEXIT_CRITICAL();
3035
3036         traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask );
3037
3038         return uxCoreAffinityMask;
3039     }
3040 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
3041
3042 /*-----------------------------------------------------------*/
3043
3044 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
3045
3046     void vTaskPreemptionDisable( const TaskHandle_t xTask )
3047     {
3048         TCB_t * pxTCB;
3049
3050         traceENTER_vTaskPreemptionDisable( xTask );
3051
3052         taskENTER_CRITICAL();
3053         {
3054             pxTCB = prvGetTCBFromHandle( xTask );
3055
3056             pxTCB->xPreemptionDisable = pdTRUE;
3057         }
3058         taskEXIT_CRITICAL();
3059
3060         traceRETURN_vTaskPreemptionDisable();
3061     }
3062
3063 #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
3064 /*-----------------------------------------------------------*/
3065
3066 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
3067
3068     void vTaskPreemptionEnable( const TaskHandle_t xTask )
3069     {
3070         TCB_t * pxTCB;
3071         BaseType_t xCoreID;
3072
3073         traceENTER_vTaskPreemptionEnable( xTask );
3074
3075         taskENTER_CRITICAL();
3076         {
3077             pxTCB = prvGetTCBFromHandle( xTask );
3078
3079             pxTCB->xPreemptionDisable = pdFALSE;
3080
3081             if( xSchedulerRunning != pdFALSE )
3082             {
3083                 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
3084                 {
3085                     xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
3086                     prvYieldCore( xCoreID );
3087                 }
3088             }
3089         }
3090         taskEXIT_CRITICAL();
3091
3092         traceRETURN_vTaskPreemptionEnable();
3093     }
3094
3095 #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
3096 /*-----------------------------------------------------------*/
3097
3098 #if ( INCLUDE_vTaskSuspend == 1 )
3099
3100     void vTaskSuspend( TaskHandle_t xTaskToSuspend )
3101     {
3102         TCB_t * pxTCB;
3103
3104         #if ( configNUMBER_OF_CORES > 1 )
3105             BaseType_t xTaskRunningOnCore;
3106         #endif
3107
3108         traceENTER_vTaskSuspend( xTaskToSuspend );
3109
3110         taskENTER_CRITICAL();
3111         {
3112             /* If null is passed in here then it is the running task that is
3113              * being suspended. */
3114             pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
3115
3116             traceTASK_SUSPEND( pxTCB );
3117
3118             #if ( configNUMBER_OF_CORES > 1 )
3119                 xTaskRunningOnCore = pxTCB->xTaskRunState;
3120             #endif
3121
3122             /* Remove task from the ready/delayed list and place in the
3123              * suspended list. */
3124             if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
3125             {
3126                 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
3127             }
3128             else
3129             {
3130                 mtCOVERAGE_TEST_MARKER();
3131             }
3132
3133             /* Is the task waiting on an event also? */
3134             if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3135             {
3136                 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
3137             }
3138             else
3139             {
3140                 mtCOVERAGE_TEST_MARKER();
3141             }
3142
3143             vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
3144
3145             #if ( configUSE_TASK_NOTIFICATIONS == 1 )
3146             {
3147                 BaseType_t x;
3148
3149                 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
3150                 {
3151                     if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
3152                     {
3153                         /* The task was blocked to wait for a notification, but is
3154                          * now suspended, so no notification was received. */
3155                         pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
3156                     }
3157                 }
3158             }
3159             #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
3160         }
3161
3162         #if ( configNUMBER_OF_CORES == 1 )
3163         {
3164             taskEXIT_CRITICAL();
3165
3166             if( xSchedulerRunning != pdFALSE )
3167             {
3168                 /* Reset the next expected unblock time in case it referred to the
3169                  * task that is now in the Suspended state. */
3170                 taskENTER_CRITICAL();
3171                 {
3172                     prvResetNextTaskUnblockTime();
3173                 }
3174                 taskEXIT_CRITICAL();
3175             }
3176             else
3177             {
3178                 mtCOVERAGE_TEST_MARKER();
3179             }
3180
3181             if( pxTCB == pxCurrentTCB )
3182             {
3183                 if( xSchedulerRunning != pdFALSE )
3184                 {
3185                     /* The current task has just been suspended. */
3186                     configASSERT( uxSchedulerSuspended == 0 );
3187                     portYIELD_WITHIN_API();
3188                 }
3189                 else
3190                 {
3191                     /* The scheduler is not running, but the task that was pointed
3192                      * to by pxCurrentTCB has just been suspended and pxCurrentTCB
3193                      * must be adjusted to point to a different task. */
3194                     if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks )
3195                     {
3196                         /* No other tasks are ready, so set pxCurrentTCB back to
3197                          * NULL so when the next task is created pxCurrentTCB will
3198                          * be set to point to it no matter what its relative priority
3199                          * is. */
3200                         pxCurrentTCB = NULL;
3201                     }
3202                     else
3203                     {
3204                         vTaskSwitchContext();
3205                     }
3206                 }
3207             }
3208             else
3209             {
3210                 mtCOVERAGE_TEST_MARKER();
3211             }
3212         }
3213         #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3214         {
3215             if( xSchedulerRunning != pdFALSE )
3216             {
3217                 /* Reset the next expected unblock time in case it referred to the
3218                  * task that is now in the Suspended state. */
3219                 prvResetNextTaskUnblockTime();
3220             }
3221             else
3222             {
3223                 mtCOVERAGE_TEST_MARKER();
3224             }
3225
3226             if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
3227             {
3228                 if( xSchedulerRunning != pdFALSE )
3229                 {
3230                     if( xTaskRunningOnCore == ( BaseType_t ) portGET_CORE_ID() )
3231                     {
3232                         /* The current task has just been suspended. */
3233                         configASSERT( uxSchedulerSuspended == 0 );
3234                         vTaskYieldWithinAPI();
3235                     }
3236                     else
3237                     {
3238                         prvYieldCore( xTaskRunningOnCore );
3239                     }
3240                 }
3241                 else
3242                 {
3243                     /* This code path is not possible because only Idle tasks are
3244                      * assigned a core before the scheduler is started ( i.e.
3245                      * taskTASK_IS_RUNNING is only true for idle tasks before
3246                      * the scheduler is started ) and idle tasks cannot be
3247                      * suspended. */
3248                     mtCOVERAGE_TEST_MARKER();
3249                 }
3250             }
3251             else
3252             {
3253                 mtCOVERAGE_TEST_MARKER();
3254             }
3255
3256             taskEXIT_CRITICAL();
3257         }
3258         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3259
3260         traceRETURN_vTaskSuspend();
3261     }
3262
3263 #endif /* INCLUDE_vTaskSuspend */
3264 /*-----------------------------------------------------------*/
3265
3266 #if ( INCLUDE_vTaskSuspend == 1 )
3267
3268     static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
3269     {
3270         BaseType_t xReturn = pdFALSE;
3271         const TCB_t * const pxTCB = xTask;
3272
3273         /* Accesses xPendingReadyList so must be called from a critical
3274          * section. */
3275
3276         /* It does not make sense to check if the calling task is suspended. */
3277         configASSERT( xTask );
3278
3279         /* Is the task being resumed actually in the suspended list? */
3280         if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
3281         {
3282             /* Has the task already been resumed from within an ISR? */
3283             if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
3284             {
3285                 /* Is it in the suspended list because it is in the Suspended
3286                  * state, or because it is blocked with no timeout? */
3287                 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE )
3288                 {
3289                     #if ( configUSE_TASK_NOTIFICATIONS == 1 )
3290                     {
3291                         BaseType_t x;
3292
3293                         /* The task does not appear on the event list item of
3294                          * and of the RTOS objects, but could still be in the
3295                          * blocked state if it is waiting on its notification
3296                          * rather than waiting on an object.  If not, is
3297                          * suspended. */
3298                         xReturn = pdTRUE;
3299
3300                         for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
3301                         {
3302                             if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
3303                             {
3304                                 xReturn = pdFALSE;
3305                                 break;
3306                             }
3307                         }
3308                     }
3309                     #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
3310                     {
3311                         xReturn = pdTRUE;
3312                     }
3313                     #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
3314                 }
3315                 else
3316                 {
3317                     mtCOVERAGE_TEST_MARKER();
3318                 }
3319             }
3320             else
3321             {
3322                 mtCOVERAGE_TEST_MARKER();
3323             }
3324         }
3325         else
3326         {
3327             mtCOVERAGE_TEST_MARKER();
3328         }
3329
3330         return xReturn;
3331     }
3332
3333 #endif /* INCLUDE_vTaskSuspend */
3334 /*-----------------------------------------------------------*/
3335
3336 #if ( INCLUDE_vTaskSuspend == 1 )
3337
3338     void vTaskResume( TaskHandle_t xTaskToResume )
3339     {
3340         TCB_t * const pxTCB = xTaskToResume;
3341
3342         traceENTER_vTaskResume( xTaskToResume );
3343
3344         /* It does not make sense to resume the calling task. */
3345         configASSERT( xTaskToResume );
3346
3347         #if ( configNUMBER_OF_CORES == 1 )
3348
3349             /* The parameter cannot be NULL as it is impossible to resume the
3350              * currently executing task. */
3351             if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
3352         #else
3353
3354             /* The parameter cannot be NULL as it is impossible to resume the
3355              * currently executing task. It is also impossible to resume a task
3356              * that is actively running on another core but it is not safe
3357              * to check their run state here. Therefore, we get into a critical
3358              * section and check if the task is actually suspended or not. */
3359             if( pxTCB != NULL )
3360         #endif
3361         {
3362             taskENTER_CRITICAL();
3363             {
3364                 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
3365                 {
3366                     traceTASK_RESUME( pxTCB );
3367
3368                     /* The ready list can be accessed even if the scheduler is
3369                      * suspended because this is inside a critical section. */
3370                     ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3371                     prvAddTaskToReadyList( pxTCB );
3372
3373                     /* This yield may not cause the task just resumed to run,
3374                      * but will leave the lists in the correct state for the
3375                      * next yield. */
3376                     taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
3377                 }
3378                 else
3379                 {
3380                     mtCOVERAGE_TEST_MARKER();
3381                 }
3382             }
3383             taskEXIT_CRITICAL();
3384         }
3385         else
3386         {
3387             mtCOVERAGE_TEST_MARKER();
3388         }
3389
3390         traceRETURN_vTaskResume();
3391     }
3392
3393 #endif /* INCLUDE_vTaskSuspend */
3394
3395 /*-----------------------------------------------------------*/
3396
3397 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
3398
3399     BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
3400     {
3401         BaseType_t xYieldRequired = pdFALSE;
3402         TCB_t * const pxTCB = xTaskToResume;
3403         UBaseType_t uxSavedInterruptStatus;
3404
3405         traceENTER_xTaskResumeFromISR( xTaskToResume );
3406
3407         configASSERT( xTaskToResume );
3408
3409         /* RTOS ports that support interrupt nesting have the concept of a
3410          * maximum  system call (or maximum API call) interrupt priority.
3411          * Interrupts that are  above the maximum system call priority are keep
3412          * permanently enabled, even when the RTOS kernel is in a critical section,
3413          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
3414          * is defined in FreeRTOSConfig.h then
3415          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
3416          * failure if a FreeRTOS API function is called from an interrupt that has
3417          * been assigned a priority above the configured maximum system call
3418          * priority.  Only FreeRTOS functions that end in FromISR can be called
3419          * from interrupts  that have been assigned a priority at or (logically)
3420          * below the maximum system call interrupt priority.  FreeRTOS maintains a
3421          * separate interrupt safe API to ensure interrupt entry is as fast and as
3422          * simple as possible.  More information (albeit Cortex-M specific) is
3423          * provided on the following link:
3424          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
3425         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
3426
3427         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
3428         {
3429             if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
3430             {
3431                 traceTASK_RESUME_FROM_ISR( pxTCB );
3432
3433                 /* Check the ready lists can be accessed. */
3434                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
3435                 {
3436                     #if ( configNUMBER_OF_CORES == 1 )
3437                     {
3438                         /* Ready lists can be accessed so move the task from the
3439                          * suspended list to the ready list directly. */
3440                         if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
3441                         {
3442                             xYieldRequired = pdTRUE;
3443
3444                             /* Mark that a yield is pending in case the user is not
3445                              * using the return value to initiate a context switch
3446                              * from the ISR using the port specific portYIELD_FROM_ISR(). */
3447                             xYieldPendings[ 0 ] = pdTRUE;
3448                         }
3449                         else
3450                         {
3451                             mtCOVERAGE_TEST_MARKER();
3452                         }
3453                     }
3454                     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3455
3456                     ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3457                     prvAddTaskToReadyList( pxTCB );
3458                 }
3459                 else
3460                 {
3461                     /* The delayed or ready lists cannot be accessed so the task
3462                      * is held in the pending ready list until the scheduler is
3463                      * unsuspended. */
3464                     vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
3465                 }
3466
3467                 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) )
3468                 {
3469                     prvYieldForTask( pxTCB );
3470
3471                     if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
3472                     {
3473                         xYieldRequired = pdTRUE;
3474                     }
3475                 }
3476                 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */
3477             }
3478             else
3479             {
3480                 mtCOVERAGE_TEST_MARKER();
3481             }
3482         }
3483         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
3484
3485         traceRETURN_xTaskResumeFromISR( xYieldRequired );
3486
3487         return xYieldRequired;
3488     }
3489
3490 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
3491 /*-----------------------------------------------------------*/
3492
3493 static BaseType_t prvCreateIdleTasks( void )
3494 {
3495     BaseType_t xReturn = pdPASS;
3496     BaseType_t xCoreID;
3497     char cIdleName[ configMAX_TASK_NAME_LEN ];
3498     TaskFunction_t pxIdleTaskFunction = NULL;
3499     BaseType_t xIdleTaskNameIndex;
3500
3501     for( xIdleTaskNameIndex = ( BaseType_t ) 0; xIdleTaskNameIndex < ( BaseType_t ) configMAX_TASK_NAME_LEN; xIdleTaskNameIndex++ )
3502     {
3503         cIdleName[ xIdleTaskNameIndex ] = configIDLE_TASK_NAME[ xIdleTaskNameIndex ];
3504
3505         /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
3506          * configMAX_TASK_NAME_LEN characters just in case the memory after the
3507          * string is not accessible (extremely unlikely). */
3508         if( cIdleName[ xIdleTaskNameIndex ] == ( char ) 0x00 )
3509         {
3510             break;
3511         }
3512         else
3513         {
3514             mtCOVERAGE_TEST_MARKER();
3515         }
3516     }
3517
3518     /* Add each idle task at the lowest priority. */
3519     for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
3520     {
3521         #if ( configNUMBER_OF_CORES == 1 )
3522         {
3523             pxIdleTaskFunction = prvIdleTask;
3524         }
3525         #else /* #if (  configNUMBER_OF_CORES == 1 ) */
3526         {
3527             /* In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 passive idle tasks
3528              * are also created to ensure that each core has an idle task to
3529              * run when no other task is available to run. */
3530             if( xCoreID == 0 )
3531             {
3532                 pxIdleTaskFunction = prvIdleTask;
3533             }
3534             else
3535             {
3536                 pxIdleTaskFunction = prvPassiveIdleTask;
3537             }
3538         }
3539         #endif /* #if (  configNUMBER_OF_CORES == 1 ) */
3540
3541         /* Update the idle task name with suffix to differentiate the idle tasks.
3542          * This function is not required in single core FreeRTOS since there is
3543          * only one idle task. */
3544         #if ( configNUMBER_OF_CORES > 1 )
3545         {
3546             /* Append the idle task number to the end of the name if there is space. */
3547             if( xIdleTaskNameIndex < ( BaseType_t ) configMAX_TASK_NAME_LEN )
3548             {
3549                 cIdleName[ xIdleTaskNameIndex ] = ( char ) ( xCoreID + '0' );
3550
3551                 /* And append a null character if there is space. */
3552                 if( ( xIdleTaskNameIndex + 1 ) < ( BaseType_t ) configMAX_TASK_NAME_LEN )
3553                 {
3554                     cIdleName[ xIdleTaskNameIndex + 1 ] = '\0';
3555                 }
3556                 else
3557                 {
3558                     mtCOVERAGE_TEST_MARKER();
3559                 }
3560             }
3561             else
3562             {
3563                 mtCOVERAGE_TEST_MARKER();
3564             }
3565         }
3566         #endif /* if ( configNUMBER_OF_CORES > 1 ) */
3567
3568         #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
3569         {
3570             StaticTask_t * pxIdleTaskTCBBuffer = NULL;
3571             StackType_t * pxIdleTaskStackBuffer = NULL;
3572             uint32_t ulIdleTaskStackSize;
3573
3574             /* The Idle task is created using user provided RAM - obtain the
3575              * address of the RAM then create the idle task. */
3576             #if ( configNUMBER_OF_CORES == 1 )
3577             {
3578                 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
3579             }
3580             #else
3581             {
3582                 if( xCoreID == 0 )
3583                 {
3584                     vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
3585                 }
3586                 else
3587                 {
3588                     vApplicationGetPassiveIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize, xCoreID - 1 );
3589                 }
3590             }
3591             #endif /* if ( configNUMBER_OF_CORES == 1 ) */
3592             xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( pxIdleTaskFunction,
3593                                                              cIdleName,
3594                                                              ulIdleTaskStackSize,
3595                                                              ( void * ) NULL,
3596                                                              portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3597                                                              pxIdleTaskStackBuffer,
3598                                                              pxIdleTaskTCBBuffer );
3599
3600             if( xIdleTaskHandles[ xCoreID ] != NULL )
3601             {
3602                 xReturn = pdPASS;
3603             }
3604             else
3605             {
3606                 xReturn = pdFAIL;
3607             }
3608         }
3609         #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
3610         {
3611             /* The Idle task is being created using dynamically allocated RAM. */
3612             xReturn = xTaskCreate( pxIdleTaskFunction,
3613                                    cIdleName,
3614                                    configMINIMAL_STACK_SIZE,
3615                                    ( void * ) NULL,
3616                                    portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3617                                    &xIdleTaskHandles[ xCoreID ] );
3618         }
3619         #endif /* configSUPPORT_STATIC_ALLOCATION */
3620
3621         /* Break the loop if any of the idle task is failed to be created. */
3622         if( xReturn == pdFAIL )
3623         {
3624             break;
3625         }
3626         else
3627         {
3628             #if ( configNUMBER_OF_CORES == 1 )
3629             {
3630                 mtCOVERAGE_TEST_MARKER();
3631             }
3632             #else
3633             {
3634                 /* Assign idle task to each core before SMP scheduler is running. */
3635                 xIdleTaskHandles[ xCoreID ]->xTaskRunState = xCoreID;
3636                 pxCurrentTCBs[ xCoreID ] = xIdleTaskHandles[ xCoreID ];
3637             }
3638             #endif
3639         }
3640     }
3641
3642     return xReturn;
3643 }
3644
3645 /*-----------------------------------------------------------*/
3646
3647 void vTaskStartScheduler( void )
3648 {
3649     BaseType_t xReturn;
3650
3651     traceENTER_vTaskStartScheduler();
3652
3653     #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
3654     {
3655         /* Sanity check that the UBaseType_t must have greater than or equal to
3656          * the number of bits as confNUMBER_OF_CORES. */
3657         configASSERT( ( sizeof( UBaseType_t ) * taskBITS_PER_BYTE ) >= configNUMBER_OF_CORES );
3658     }
3659     #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
3660
3661     xReturn = prvCreateIdleTasks();
3662
3663     #if ( configUSE_TIMERS == 1 )
3664     {
3665         if( xReturn == pdPASS )
3666         {
3667             xReturn = xTimerCreateTimerTask();
3668         }
3669         else
3670         {
3671             mtCOVERAGE_TEST_MARKER();
3672         }
3673     }
3674     #endif /* configUSE_TIMERS */
3675
3676     if( xReturn == pdPASS )
3677     {
3678         /* freertos_tasks_c_additions_init() should only be called if the user
3679          * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
3680          * the only macro called by the function. */
3681         #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
3682         {
3683             freertos_tasks_c_additions_init();
3684         }
3685         #endif
3686
3687         /* Interrupts are turned off here, to ensure a tick does not occur
3688          * before or during the call to xPortStartScheduler().  The stacks of
3689          * the created tasks contain a status word with interrupts switched on
3690          * so interrupts will automatically get re-enabled when the first task
3691          * starts to run. */
3692         portDISABLE_INTERRUPTS();
3693
3694         #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
3695         {
3696             /* Switch C-Runtime's TLS Block to point to the TLS
3697              * block specific to the task that will run first. */
3698             configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
3699         }
3700         #endif
3701
3702         xNextTaskUnblockTime = portMAX_DELAY;
3703         xSchedulerRunning = pdTRUE;
3704         xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
3705
3706         /* If configGENERATE_RUN_TIME_STATS is defined then the following
3707          * macro must be defined to configure the timer/counter used to generate
3708          * the run time counter time base.   NOTE:  If configGENERATE_RUN_TIME_STATS
3709          * is set to 0 and the following line fails to build then ensure you do not
3710          * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
3711          * FreeRTOSConfig.h file. */
3712         portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
3713
3714         traceTASK_SWITCHED_IN();
3715
3716         /* Setting up the timer tick is hardware specific and thus in the
3717          * portable interface. */
3718
3719         /* The return value for xPortStartScheduler is not required
3720          * hence using a void datatype. */
3721         ( void ) xPortStartScheduler();
3722
3723         /* In most cases, xPortStartScheduler() will not return. If it
3724          * returns pdTRUE then there was not enough heap memory available
3725          * to create either the Idle or the Timer task. If it returned
3726          * pdFALSE, then the application called xTaskEndScheduler().
3727          * Most ports don't implement xTaskEndScheduler() as there is
3728          * nothing to return to. */
3729     }
3730     else
3731     {
3732         /* This line will only be reached if the kernel could not be started,
3733          * because there was not enough FreeRTOS heap to create the idle task
3734          * or the timer task. */
3735         configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
3736     }
3737
3738     /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
3739      * meaning xIdleTaskHandles are not used anywhere else. */
3740     ( void ) xIdleTaskHandles;
3741
3742     /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority
3743      * from getting optimized out as it is no longer used by the kernel. */
3744     ( void ) uxTopUsedPriority;
3745
3746     traceRETURN_vTaskStartScheduler();
3747 }
3748 /*-----------------------------------------------------------*/
3749
3750 void vTaskEndScheduler( void )
3751 {
3752     traceENTER_vTaskEndScheduler();
3753
3754     /* Stop the scheduler interrupts and call the portable scheduler end
3755      * routine so the original ISRs can be restored if necessary.  The port
3756      * layer must ensure interrupts enable  bit is left in the correct state. */
3757     portDISABLE_INTERRUPTS();
3758     xSchedulerRunning = pdFALSE;
3759     vPortEndScheduler();
3760
3761     traceRETURN_vTaskEndScheduler();
3762 }
3763 /*----------------------------------------------------------*/
3764
3765 void vTaskSuspendAll( void )
3766 {
3767     traceENTER_vTaskSuspendAll();
3768
3769     #if ( configNUMBER_OF_CORES == 1 )
3770     {
3771         /* A critical section is not required as the variable is of type
3772          * BaseType_t.  Please read Richard Barry's reply in the following link to a
3773          * post in the FreeRTOS support forum before reporting this as a bug! -
3774          * https://goo.gl/wu4acr */
3775
3776         /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
3777          * do not otherwise exhibit real time behaviour. */
3778         portSOFTWARE_BARRIER();
3779
3780         /* The scheduler is suspended if uxSchedulerSuspended is non-zero.  An increment
3781          * is used to allow calls to vTaskSuspendAll() to nest. */
3782         ++uxSchedulerSuspended;
3783
3784         /* Enforces ordering for ports and optimised compilers that may otherwise place
3785          * the above increment elsewhere. */
3786         portMEMORY_BARRIER();
3787     }
3788     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3789     {
3790         UBaseType_t ulState;
3791
3792         /* This must only be called from within a task. */
3793         portASSERT_IF_IN_ISR();
3794
3795         if( xSchedulerRunning != pdFALSE )
3796         {
3797             /* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks.
3798              * We must disable interrupts before we grab the locks in the event that this task is
3799              * interrupted and switches context before incrementing uxSchedulerSuspended.
3800              * It is safe to re-enable interrupts after releasing the ISR lock and incrementing
3801              * uxSchedulerSuspended since that will prevent context switches. */
3802             ulState = portSET_INTERRUPT_MASK();
3803
3804             /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
3805              * do not otherwise exhibit real time behaviour. */
3806             portSOFTWARE_BARRIER();
3807
3808             portGET_TASK_LOCK();
3809
3810             /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The
3811              * purpose is to prevent altering the variable when fromISR APIs are readying
3812              * it. */
3813             if( uxSchedulerSuspended == 0U )
3814             {
3815                 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
3816                 {
3817                     prvCheckForRunStateChange();
3818                 }
3819                 else
3820                 {
3821                     mtCOVERAGE_TEST_MARKER();
3822                 }
3823             }
3824             else
3825             {
3826                 mtCOVERAGE_TEST_MARKER();
3827             }
3828
3829             portGET_ISR_LOCK();
3830
3831             /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
3832              * is used to allow calls to vTaskSuspendAll() to nest. */
3833             ++uxSchedulerSuspended;
3834             portRELEASE_ISR_LOCK();
3835
3836             portCLEAR_INTERRUPT_MASK( ulState );
3837         }
3838         else
3839         {
3840             mtCOVERAGE_TEST_MARKER();
3841         }
3842     }
3843     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3844
3845     traceRETURN_vTaskSuspendAll();
3846 }
3847
3848 /*----------------------------------------------------------*/
3849
3850 #if ( configUSE_TICKLESS_IDLE != 0 )
3851
3852     static TickType_t prvGetExpectedIdleTime( void )
3853     {
3854         TickType_t xReturn;
3855         UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
3856
3857         /* uxHigherPriorityReadyTasks takes care of the case where
3858          * configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
3859          * task that are in the Ready state, even though the idle task is
3860          * running. */
3861         #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
3862         {
3863             if( uxTopReadyPriority > tskIDLE_PRIORITY )
3864             {
3865                 uxHigherPriorityReadyTasks = pdTRUE;
3866             }
3867         }
3868         #else
3869         {
3870             const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
3871
3872             /* When port optimised task selection is used the uxTopReadyPriority
3873              * variable is used as a bit map.  If bits other than the least
3874              * significant bit are set then there are tasks that have a priority
3875              * above the idle priority that are in the Ready state.  This takes
3876              * care of the case where the co-operative scheduler is in use. */
3877             if( uxTopReadyPriority > uxLeastSignificantBit )
3878             {
3879                 uxHigherPriorityReadyTasks = pdTRUE;
3880             }
3881         }
3882         #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
3883
3884         if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
3885         {
3886             xReturn = 0;
3887         }
3888         else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1U )
3889         {
3890             /* There are other idle priority tasks in the ready state.  If
3891              * time slicing is used then the very next tick interrupt must be
3892              * processed. */
3893             xReturn = 0;
3894         }
3895         else if( uxHigherPriorityReadyTasks != pdFALSE )
3896         {
3897             /* There are tasks in the Ready state that have a priority above the
3898              * idle priority.  This path can only be reached if
3899              * configUSE_PREEMPTION is 0. */
3900             xReturn = 0;
3901         }
3902         else
3903         {
3904             xReturn = xNextTaskUnblockTime;
3905             xReturn -= xTickCount;
3906         }
3907
3908         return xReturn;
3909     }
3910
3911 #endif /* configUSE_TICKLESS_IDLE */
3912 /*----------------------------------------------------------*/
3913
3914 BaseType_t xTaskResumeAll( void )
3915 {
3916     TCB_t * pxTCB = NULL;
3917     BaseType_t xAlreadyYielded = pdFALSE;
3918
3919     traceENTER_xTaskResumeAll();
3920
3921     #if ( configNUMBER_OF_CORES > 1 )
3922         if( xSchedulerRunning != pdFALSE )
3923     #endif
3924     {
3925         /* It is possible that an ISR caused a task to be removed from an event
3926          * list while the scheduler was suspended.  If this was the case then the
3927          * removed task will have been added to the xPendingReadyList.  Once the
3928          * scheduler has been resumed it is safe to move all the pending ready
3929          * tasks from this list into their appropriate ready list. */
3930         taskENTER_CRITICAL();
3931         {
3932             BaseType_t xCoreID;
3933             xCoreID = ( BaseType_t ) portGET_CORE_ID();
3934
3935             /* If uxSchedulerSuspended is zero then this function does not match a
3936              * previous call to vTaskSuspendAll(). */
3937             configASSERT( uxSchedulerSuspended != 0U );
3938
3939             --uxSchedulerSuspended;
3940             portRELEASE_TASK_LOCK();
3941
3942             if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
3943             {
3944                 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
3945                 {
3946                     /* Move any readied tasks from the pending list into the
3947                      * appropriate ready list. */
3948                     while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
3949                     {
3950                         /* MISRA Ref 11.5.3 [Void pointer assignment] */
3951                         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
3952                         /* coverity[misra_c_2012_rule_11_5_violation] */
3953                         pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) );
3954                         listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
3955                         portMEMORY_BARRIER();
3956                         listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
3957                         prvAddTaskToReadyList( pxTCB );
3958
3959                         #if ( configNUMBER_OF_CORES == 1 )
3960                         {
3961                             /* If the moved task has a priority higher than the current
3962                              * task then a yield must be performed. */
3963                             if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
3964                             {
3965                                 xYieldPendings[ xCoreID ] = pdTRUE;
3966                             }
3967                             else
3968                             {
3969                                 mtCOVERAGE_TEST_MARKER();
3970                             }
3971                         }
3972                         #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3973                         {
3974                             /* All appropriate tasks yield at the moment a task is added to xPendingReadyList.
3975                              * If the current core yielded then vTaskSwitchContext() has already been called
3976                              * which sets xYieldPendings for the current core to pdTRUE. */
3977                         }
3978                         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3979                     }
3980
3981                     if( pxTCB != NULL )
3982                     {
3983                         /* A task was unblocked while the scheduler was suspended,
3984                          * which may have prevented the next unblock time from being
3985                          * re-calculated, in which case re-calculate it now.  Mainly
3986                          * important for low power tickless implementations, where
3987                          * this can prevent an unnecessary exit from low power
3988                          * state. */
3989                         prvResetNextTaskUnblockTime();
3990                     }
3991
3992                     /* If any ticks occurred while the scheduler was suspended then
3993                      * they should be processed now.  This ensures the tick count does
3994                      * not  slip, and that any delayed tasks are resumed at the correct
3995                      * time.
3996                      *
3997                      * It should be safe to call xTaskIncrementTick here from any core
3998                      * since we are in a critical section and xTaskIncrementTick itself
3999                      * protects itself within a critical section. Suspending the scheduler
4000                      * from any core causes xTaskIncrementTick to increment uxPendedCounts. */
4001                     {
4002                         TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
4003
4004                         if( xPendedCounts > ( TickType_t ) 0U )
4005                         {
4006                             do
4007                             {
4008                                 if( xTaskIncrementTick() != pdFALSE )
4009                                 {
4010                                     /* Other cores are interrupted from
4011                                      * within xTaskIncrementTick(). */
4012                                     xYieldPendings[ xCoreID ] = pdTRUE;
4013                                 }
4014                                 else
4015                                 {
4016                                     mtCOVERAGE_TEST_MARKER();
4017                                 }
4018
4019                                 --xPendedCounts;
4020                             } while( xPendedCounts > ( TickType_t ) 0U );
4021
4022                             xPendedTicks = 0;
4023                         }
4024                         else
4025                         {
4026                             mtCOVERAGE_TEST_MARKER();
4027                         }
4028                     }
4029
4030                     if( xYieldPendings[ xCoreID ] != pdFALSE )
4031                     {
4032                         #if ( configUSE_PREEMPTION != 0 )
4033                         {
4034                             xAlreadyYielded = pdTRUE;
4035                         }
4036                         #endif /* #if ( configUSE_PREEMPTION != 0 ) */
4037
4038                         #if ( configNUMBER_OF_CORES == 1 )
4039                         {
4040                             taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxCurrentTCB );
4041                         }
4042                         #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4043                     }
4044                     else
4045                     {
4046                         mtCOVERAGE_TEST_MARKER();
4047                     }
4048                 }
4049             }
4050             else
4051             {
4052                 mtCOVERAGE_TEST_MARKER();
4053             }
4054         }
4055         taskEXIT_CRITICAL();
4056     }
4057
4058     traceRETURN_xTaskResumeAll( xAlreadyYielded );
4059
4060     return xAlreadyYielded;
4061 }
4062 /*-----------------------------------------------------------*/
4063
4064 TickType_t xTaskGetTickCount( void )
4065 {
4066     TickType_t xTicks;
4067
4068     traceENTER_xTaskGetTickCount();
4069
4070     /* Critical section required if running on a 16 bit processor. */
4071     portTICK_TYPE_ENTER_CRITICAL();
4072     {
4073         xTicks = xTickCount;
4074     }
4075     portTICK_TYPE_EXIT_CRITICAL();
4076
4077     traceRETURN_xTaskGetTickCount( xTicks );
4078
4079     return xTicks;
4080 }
4081 /*-----------------------------------------------------------*/
4082
4083 TickType_t xTaskGetTickCountFromISR( void )
4084 {
4085     TickType_t xReturn;
4086     UBaseType_t uxSavedInterruptStatus;
4087
4088     traceENTER_xTaskGetTickCountFromISR();
4089
4090     /* RTOS ports that support interrupt nesting have the concept of a maximum
4091      * system call (or maximum API call) interrupt priority.  Interrupts that are
4092      * above the maximum system call priority are kept permanently enabled, even
4093      * when the RTOS kernel is in a critical section, but cannot make any calls to
4094      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
4095      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
4096      * failure if a FreeRTOS API function is called from an interrupt that has been
4097      * assigned a priority above the configured maximum system call priority.
4098      * Only FreeRTOS functions that end in FromISR can be called from interrupts
4099      * that have been assigned a priority at or (logically) below the maximum
4100      * system call  interrupt priority.  FreeRTOS maintains a separate interrupt
4101      * safe API to ensure interrupt entry is as fast and as simple as possible.
4102      * More information (albeit Cortex-M specific) is provided on the following
4103      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
4104     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
4105
4106     uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
4107     {
4108         xReturn = xTickCount;
4109     }
4110     portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
4111
4112     traceRETURN_xTaskGetTickCountFromISR( xReturn );
4113
4114     return xReturn;
4115 }
4116 /*-----------------------------------------------------------*/
4117
4118 UBaseType_t uxTaskGetNumberOfTasks( void )
4119 {
4120     traceENTER_uxTaskGetNumberOfTasks();
4121
4122     /* A critical section is not required because the variables are of type
4123      * BaseType_t. */
4124     traceRETURN_uxTaskGetNumberOfTasks( uxCurrentNumberOfTasks );
4125
4126     return uxCurrentNumberOfTasks;
4127 }
4128 /*-----------------------------------------------------------*/
4129
4130 char * pcTaskGetName( TaskHandle_t xTaskToQuery )
4131 {
4132     TCB_t * pxTCB;
4133
4134     traceENTER_pcTaskGetName( xTaskToQuery );
4135
4136     /* If null is passed in here then the name of the calling task is being
4137      * queried. */
4138     pxTCB = prvGetTCBFromHandle( xTaskToQuery );
4139     configASSERT( pxTCB );
4140
4141     traceRETURN_pcTaskGetName( &( pxTCB->pcTaskName[ 0 ] ) );
4142
4143     return &( pxTCB->pcTaskName[ 0 ] );
4144 }
4145 /*-----------------------------------------------------------*/
4146
4147 #if ( INCLUDE_xTaskGetHandle == 1 )
4148
4149     #if ( configNUMBER_OF_CORES == 1 )
4150         static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
4151                                                          const char pcNameToQuery[] )
4152         {
4153             TCB_t * pxNextTCB;
4154             TCB_t * pxFirstTCB;
4155             TCB_t * pxReturn = NULL;
4156             UBaseType_t x;
4157             char cNextChar;
4158             BaseType_t xBreakLoop;
4159
4160             /* This function is called with the scheduler suspended. */
4161
4162             if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
4163             {
4164                 /* MISRA Ref 11.5.3 [Void pointer assignment] */
4165                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4166                 /* coverity[misra_c_2012_rule_11_5_violation] */
4167                 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
4168
4169                 do
4170                 {
4171                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
4172                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4173                     /* coverity[misra_c_2012_rule_11_5_violation] */
4174                     listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
4175
4176                     /* Check each character in the name looking for a match or
4177                      * mismatch. */
4178                     xBreakLoop = pdFALSE;
4179
4180                     for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
4181                     {
4182                         cNextChar = pxNextTCB->pcTaskName[ x ];
4183
4184                         if( cNextChar != pcNameToQuery[ x ] )
4185                         {
4186                             /* Characters didn't match. */
4187                             xBreakLoop = pdTRUE;
4188                         }
4189                         else if( cNextChar == ( char ) 0x00 )
4190                         {
4191                             /* Both strings terminated, a match must have been
4192                              * found. */
4193                             pxReturn = pxNextTCB;
4194                             xBreakLoop = pdTRUE;
4195                         }
4196                         else
4197                         {
4198                             mtCOVERAGE_TEST_MARKER();
4199                         }
4200
4201                         if( xBreakLoop != pdFALSE )
4202                         {
4203                             break;
4204                         }
4205                     }
4206
4207                     if( pxReturn != NULL )
4208                     {
4209                         /* The handle has been found. */
4210                         break;
4211                     }
4212                 } while( pxNextTCB != pxFirstTCB );
4213             }
4214             else
4215             {
4216                 mtCOVERAGE_TEST_MARKER();
4217             }
4218
4219             return pxReturn;
4220         }
4221     #else /* if ( configNUMBER_OF_CORES == 1 ) */
4222         static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
4223                                                          const char pcNameToQuery[] )
4224         {
4225             TCB_t * pxReturn = NULL;
4226             UBaseType_t x;
4227             char cNextChar;
4228             BaseType_t xBreakLoop;
4229             const ListItem_t * pxEndMarker = listGET_END_MARKER( pxList );
4230             ListItem_t * pxIterator;
4231
4232             /* This function is called with the scheduler suspended. */
4233
4234             if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
4235             {
4236                 for( pxIterator = listGET_HEAD_ENTRY( pxList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
4237                 {
4238                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
4239                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4240                     /* coverity[misra_c_2012_rule_11_5_violation] */
4241                     TCB_t * pxTCB = listGET_LIST_ITEM_OWNER( pxIterator );
4242
4243                     /* Check each character in the name looking for a match or
4244                      * mismatch. */
4245                     xBreakLoop = pdFALSE;
4246
4247                     for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
4248                     {
4249                         cNextChar = pxTCB->pcTaskName[ x ];
4250
4251                         if( cNextChar != pcNameToQuery[ x ] )
4252                         {
4253                             /* Characters didn't match. */
4254                             xBreakLoop = pdTRUE;
4255                         }
4256                         else if( cNextChar == ( char ) 0x00 )
4257                         {
4258                             /* Both strings terminated, a match must have been
4259                              * found. */
4260                             pxReturn = pxTCB;
4261                             xBreakLoop = pdTRUE;
4262                         }
4263                         else
4264                         {
4265                             mtCOVERAGE_TEST_MARKER();
4266                         }
4267
4268                         if( xBreakLoop != pdFALSE )
4269                         {
4270                             break;
4271                         }
4272                     }
4273
4274                     if( pxReturn != NULL )
4275                     {
4276                         /* The handle has been found. */
4277                         break;
4278                     }
4279                 }
4280             }
4281             else
4282             {
4283                 mtCOVERAGE_TEST_MARKER();
4284             }
4285
4286             return pxReturn;
4287         }
4288     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4289
4290 #endif /* INCLUDE_xTaskGetHandle */
4291 /*-----------------------------------------------------------*/
4292
4293 #if ( INCLUDE_xTaskGetHandle == 1 )
4294
4295     TaskHandle_t xTaskGetHandle( const char * pcNameToQuery )
4296     {
4297         UBaseType_t uxQueue = configMAX_PRIORITIES;
4298         TCB_t * pxTCB;
4299
4300         traceENTER_xTaskGetHandle( pcNameToQuery );
4301
4302         /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
4303         configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
4304
4305         vTaskSuspendAll();
4306         {
4307             /* Search the ready lists. */
4308             do
4309             {
4310                 uxQueue--;
4311                 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
4312
4313                 if( pxTCB != NULL )
4314                 {
4315                     /* Found the handle. */
4316                     break;
4317                 }
4318             } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY );
4319
4320             /* Search the delayed lists. */
4321             if( pxTCB == NULL )
4322             {
4323                 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
4324             }
4325
4326             if( pxTCB == NULL )
4327             {
4328                 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
4329             }
4330
4331             #if ( INCLUDE_vTaskSuspend == 1 )
4332             {
4333                 if( pxTCB == NULL )
4334                 {
4335                     /* Search the suspended list. */
4336                     pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
4337                 }
4338             }
4339             #endif
4340
4341             #if ( INCLUDE_vTaskDelete == 1 )
4342             {
4343                 if( pxTCB == NULL )
4344                 {
4345                     /* Search the deleted list. */
4346                     pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
4347                 }
4348             }
4349             #endif
4350         }
4351         ( void ) xTaskResumeAll();
4352
4353         traceRETURN_xTaskGetHandle( pxTCB );
4354
4355         return pxTCB;
4356     }
4357
4358 #endif /* INCLUDE_xTaskGetHandle */
4359 /*-----------------------------------------------------------*/
4360
4361 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
4362
4363     BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
4364                                       StackType_t ** ppuxStackBuffer,
4365                                       StaticTask_t ** ppxTaskBuffer )
4366     {
4367         BaseType_t xReturn;
4368         TCB_t * pxTCB;
4369
4370         traceENTER_xTaskGetStaticBuffers( xTask, ppuxStackBuffer, ppxTaskBuffer );
4371
4372         configASSERT( ppuxStackBuffer != NULL );
4373         configASSERT( ppxTaskBuffer != NULL );
4374
4375         pxTCB = prvGetTCBFromHandle( xTask );
4376
4377         #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
4378         {
4379             if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
4380             {
4381                 *ppuxStackBuffer = pxTCB->pxStack;
4382                 /* MISRA Ref 11.3.1 [Misaligned access] */
4383                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
4384                 /* coverity[misra_c_2012_rule_11_3_violation] */
4385                 *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
4386                 xReturn = pdTRUE;
4387             }
4388             else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
4389             {
4390                 *ppuxStackBuffer = pxTCB->pxStack;
4391                 *ppxTaskBuffer = NULL;
4392                 xReturn = pdTRUE;
4393             }
4394             else
4395             {
4396                 xReturn = pdFALSE;
4397             }
4398         }
4399         #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
4400         {
4401             *ppuxStackBuffer = pxTCB->pxStack;
4402             *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
4403             xReturn = pdTRUE;
4404         }
4405         #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
4406
4407         traceRETURN_xTaskGetStaticBuffers( xReturn );
4408
4409         return xReturn;
4410     }
4411
4412 #endif /* configSUPPORT_STATIC_ALLOCATION */
4413 /*-----------------------------------------------------------*/
4414
4415 #if ( configUSE_TRACE_FACILITY == 1 )
4416
4417     UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
4418                                       const UBaseType_t uxArraySize,
4419                                       configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime )
4420     {
4421         UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
4422
4423         traceENTER_uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime );
4424
4425         vTaskSuspendAll();
4426         {
4427             /* Is there a space in the array for each task in the system? */
4428             if( uxArraySize >= uxCurrentNumberOfTasks )
4429             {
4430                 /* Fill in an TaskStatus_t structure with information on each
4431                  * task in the Ready state. */
4432                 do
4433                 {
4434                     uxQueue--;
4435                     uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ) );
4436                 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY );
4437
4438                 /* Fill in an TaskStatus_t structure with information on each
4439                  * task in the Blocked state. */
4440                 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ) );
4441                 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ) );
4442
4443                 #if ( INCLUDE_vTaskDelete == 1 )
4444                 {
4445                     /* Fill in an TaskStatus_t structure with information on
4446                      * each task that has been deleted but not yet cleaned up. */
4447                     uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ) );
4448                 }
4449                 #endif
4450
4451                 #if ( INCLUDE_vTaskSuspend == 1 )
4452                 {
4453                     /* Fill in an TaskStatus_t structure with information on
4454                      * each task in the Suspended state. */
4455                     uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ) );
4456                 }
4457                 #endif
4458
4459                 #if ( configGENERATE_RUN_TIME_STATS == 1 )
4460                 {
4461                     if( pulTotalRunTime != NULL )
4462                     {
4463                         #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
4464                             portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
4465                         #else
4466                             *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
4467                         #endif
4468                     }
4469                 }
4470                 #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
4471                 {
4472                     if( pulTotalRunTime != NULL )
4473                     {
4474                         *pulTotalRunTime = 0;
4475                     }
4476                 }
4477                 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
4478             }
4479             else
4480             {
4481                 mtCOVERAGE_TEST_MARKER();
4482             }
4483         }
4484         ( void ) xTaskResumeAll();
4485
4486         traceRETURN_uxTaskGetSystemState( uxTask );
4487
4488         return uxTask;
4489     }
4490
4491 #endif /* configUSE_TRACE_FACILITY */
4492 /*----------------------------------------------------------*/
4493
4494 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
4495
4496     #if ( configNUMBER_OF_CORES == 1 )
4497         TaskHandle_t xTaskGetIdleTaskHandle( void )
4498         {
4499             traceENTER_xTaskGetIdleTaskHandle();
4500
4501             /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
4502              * started, then xIdleTaskHandles will be NULL. */
4503             configASSERT( ( xIdleTaskHandles[ 0 ] != NULL ) );
4504
4505             traceRETURN_xTaskGetIdleTaskHandle( xIdleTaskHandles[ 0 ] );
4506
4507             return xIdleTaskHandles[ 0 ];
4508         }
4509     #endif /* if ( configNUMBER_OF_CORES == 1 ) */
4510
4511     TaskHandle_t xTaskGetIdleTaskHandleForCore( BaseType_t xCoreID )
4512     {
4513         traceENTER_xTaskGetIdleTaskHandleForCore( xCoreID );
4514
4515         /* Ensure the core ID is valid. */
4516         configASSERT( taskVALID_CORE_ID( xCoreID ) == pdTRUE );
4517
4518         /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
4519          * started, then xIdleTaskHandles will be NULL. */
4520         configASSERT( ( xIdleTaskHandles[ xCoreID ] != NULL ) );
4521
4522         traceRETURN_xTaskGetIdleTaskHandleForCore( xIdleTaskHandles[ xCoreID ] );
4523
4524         return xIdleTaskHandles[ xCoreID ];
4525     }
4526
4527 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
4528 /*----------------------------------------------------------*/
4529
4530 /* This conditional compilation should use inequality to 0, not equality to 1.
4531  * This is to ensure vTaskStepTick() is available when user defined low power mode
4532  * implementations require configUSE_TICKLESS_IDLE to be set to a value other than
4533  * 1. */
4534 #if ( configUSE_TICKLESS_IDLE != 0 )
4535
4536     void vTaskStepTick( TickType_t xTicksToJump )
4537     {
4538         TickType_t xUpdatedTickCount;
4539
4540         traceENTER_vTaskStepTick( xTicksToJump );
4541
4542         /* Correct the tick count value after a period during which the tick
4543          * was suppressed.  Note this does *not* call the tick hook function for
4544          * each stepped tick. */
4545         xUpdatedTickCount = xTickCount + xTicksToJump;
4546         configASSERT( xUpdatedTickCount <= xNextTaskUnblockTime );
4547
4548         if( xUpdatedTickCount == xNextTaskUnblockTime )
4549         {
4550             /* Arrange for xTickCount to reach xNextTaskUnblockTime in
4551              * xTaskIncrementTick() when the scheduler resumes.  This ensures
4552              * that any delayed tasks are resumed at the correct time. */
4553             configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
4554             configASSERT( xTicksToJump != ( TickType_t ) 0 );
4555
4556             /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
4557             taskENTER_CRITICAL();
4558             {
4559                 xPendedTicks++;
4560             }
4561             taskEXIT_CRITICAL();
4562             xTicksToJump--;
4563         }
4564         else
4565         {
4566             mtCOVERAGE_TEST_MARKER();
4567         }
4568
4569         xTickCount += xTicksToJump;
4570
4571         traceINCREASE_TICK_COUNT( xTicksToJump );
4572         traceRETURN_vTaskStepTick();
4573     }
4574
4575 #endif /* configUSE_TICKLESS_IDLE */
4576 /*----------------------------------------------------------*/
4577
4578 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
4579 {
4580     BaseType_t xYieldOccurred;
4581
4582     traceENTER_xTaskCatchUpTicks( xTicksToCatchUp );
4583
4584     /* Must not be called with the scheduler suspended as the implementation
4585      * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
4586     configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
4587
4588     /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
4589      * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
4590     vTaskSuspendAll();
4591
4592     /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
4593     taskENTER_CRITICAL();
4594     {
4595         xPendedTicks += xTicksToCatchUp;
4596     }
4597     taskEXIT_CRITICAL();
4598     xYieldOccurred = xTaskResumeAll();
4599
4600     traceRETURN_xTaskCatchUpTicks( xYieldOccurred );
4601
4602     return xYieldOccurred;
4603 }
4604 /*----------------------------------------------------------*/
4605
4606 #if ( INCLUDE_xTaskAbortDelay == 1 )
4607
4608     BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
4609     {
4610         TCB_t * pxTCB = xTask;
4611         BaseType_t xReturn;
4612
4613         traceENTER_xTaskAbortDelay( xTask );
4614
4615         configASSERT( pxTCB );
4616
4617         vTaskSuspendAll();
4618         {
4619             /* A task can only be prematurely removed from the Blocked state if
4620              * it is actually in the Blocked state. */
4621             if( eTaskGetState( xTask ) == eBlocked )
4622             {
4623                 xReturn = pdPASS;
4624
4625                 /* Remove the reference to the task from the blocked list.  An
4626                  * interrupt won't touch the xStateListItem because the
4627                  * scheduler is suspended. */
4628                 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4629
4630                 /* Is the task waiting on an event also?  If so remove it from
4631                  * the event list too.  Interrupts can touch the event list item,
4632                  * even though the scheduler is suspended, so a critical section
4633                  * is used. */
4634                 taskENTER_CRITICAL();
4635                 {
4636                     if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4637                     {
4638                         ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
4639
4640                         /* This lets the task know it was forcibly removed from the
4641                          * blocked state so it should not re-evaluate its block time and
4642                          * then block again. */
4643                         pxTCB->ucDelayAborted = pdTRUE;
4644                     }
4645                     else
4646                     {
4647                         mtCOVERAGE_TEST_MARKER();
4648                     }
4649                 }
4650                 taskEXIT_CRITICAL();
4651
4652                 /* Place the unblocked task into the appropriate ready list. */
4653                 prvAddTaskToReadyList( pxTCB );
4654
4655                 /* A task being unblocked cannot cause an immediate context
4656                  * switch if preemption is turned off. */
4657                 #if ( configUSE_PREEMPTION == 1 )
4658                 {
4659                     #if ( configNUMBER_OF_CORES == 1 )
4660                     {
4661                         /* Preemption is on, but a context switch should only be
4662                          * performed if the unblocked task has a priority that is
4663                          * higher than the currently executing task. */
4664                         if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4665                         {
4666                             /* Pend the yield to be performed when the scheduler
4667                              * is unsuspended. */
4668                             xYieldPendings[ 0 ] = pdTRUE;
4669                         }
4670                         else
4671                         {
4672                             mtCOVERAGE_TEST_MARKER();
4673                         }
4674                     }
4675                     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4676                     {
4677                         taskENTER_CRITICAL();
4678                         {
4679                             prvYieldForTask( pxTCB );
4680                         }
4681                         taskEXIT_CRITICAL();
4682                     }
4683                     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4684                 }
4685                 #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4686             }
4687             else
4688             {
4689                 xReturn = pdFAIL;
4690             }
4691         }
4692         ( void ) xTaskResumeAll();
4693
4694         traceRETURN_xTaskAbortDelay( xReturn );
4695
4696         return xReturn;
4697     }
4698
4699 #endif /* INCLUDE_xTaskAbortDelay */
4700 /*----------------------------------------------------------*/
4701
4702 BaseType_t xTaskIncrementTick( void )
4703 {
4704     TCB_t * pxTCB;
4705     TickType_t xItemValue;
4706     BaseType_t xSwitchRequired = pdFALSE;
4707
4708     #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 )
4709     BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE };
4710     #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
4711
4712     traceENTER_xTaskIncrementTick();
4713
4714     /* Called by the portable layer each time a tick interrupt occurs.
4715      * Increments the tick then checks to see if the new tick value will cause any
4716      * tasks to be unblocked. */
4717     traceTASK_INCREMENT_TICK( xTickCount );
4718
4719     /* Tick increment should occur on every kernel timer event. Core 0 has the
4720      * responsibility to increment the tick, or increment the pended ticks if the
4721      * scheduler is suspended.  If pended ticks is greater than zero, the core that
4722      * calls xTaskResumeAll has the responsibility to increment the tick. */
4723     if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
4724     {
4725         /* Minor optimisation.  The tick count cannot change in this
4726          * block. */
4727         const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
4728
4729         /* Increment the RTOS tick, switching the delayed and overflowed
4730          * delayed lists if it wraps to 0. */
4731         xTickCount = xConstTickCount;
4732
4733         if( xConstTickCount == ( TickType_t ) 0U )
4734         {
4735             taskSWITCH_DELAYED_LISTS();
4736         }
4737         else
4738         {
4739             mtCOVERAGE_TEST_MARKER();
4740         }
4741
4742         /* See if this tick has made a timeout expire.  Tasks are stored in
4743          * the  queue in the order of their wake time - meaning once one task
4744          * has been found whose block time has not expired there is no need to
4745          * look any further down the list. */
4746         if( xConstTickCount >= xNextTaskUnblockTime )
4747         {
4748             for( ; ; )
4749             {
4750                 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
4751                 {
4752                     /* The delayed list is empty.  Set xNextTaskUnblockTime
4753                      * to the maximum possible value so it is extremely
4754                      * unlikely that the
4755                      * if( xTickCount >= xNextTaskUnblockTime ) test will pass
4756                      * next time through. */
4757                     xNextTaskUnblockTime = portMAX_DELAY;
4758                     break;
4759                 }
4760                 else
4761                 {
4762                     /* The delayed list is not empty, get the value of the
4763                      * item at the head of the delayed list.  This is the time
4764                      * at which the task at the head of the delayed list must
4765                      * be removed from the Blocked state. */
4766                     /* MISRA Ref 11.5.3 [Void pointer assignment] */
4767                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
4768                     /* coverity[misra_c_2012_rule_11_5_violation] */
4769                     pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList );
4770                     xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
4771
4772                     if( xConstTickCount < xItemValue )
4773                     {
4774                         /* It is not time to unblock this item yet, but the
4775                          * item value is the time at which the task at the head
4776                          * of the blocked list must be removed from the Blocked
4777                          * state -  so record the item value in
4778                          * xNextTaskUnblockTime. */
4779                         xNextTaskUnblockTime = xItemValue;
4780                         break;
4781                     }
4782                     else
4783                     {
4784                         mtCOVERAGE_TEST_MARKER();
4785                     }
4786
4787                     /* It is time to remove the item from the Blocked state. */
4788                     listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
4789
4790                     /* Is the task waiting on an event also?  If so remove
4791                      * it from the event list. */
4792                     if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4793                     {
4794                         listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
4795                     }
4796                     else
4797                     {
4798                         mtCOVERAGE_TEST_MARKER();
4799                     }
4800
4801                     /* Place the unblocked task into the appropriate ready
4802                      * list. */
4803                     prvAddTaskToReadyList( pxTCB );
4804
4805                     /* A task being unblocked cannot cause an immediate
4806                      * context switch if preemption is turned off. */
4807                     #if ( configUSE_PREEMPTION == 1 )
4808                     {
4809                         #if ( configNUMBER_OF_CORES == 1 )
4810                         {
4811                             /* Preemption is on, but a context switch should
4812                              * only be performed if the unblocked task's
4813                              * priority is higher than the currently executing
4814                              * task.
4815                              * The case of equal priority tasks sharing
4816                              * processing time (which happens when both
4817                              * preemption and time slicing are on) is
4818                              * handled below.*/
4819                             if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4820                             {
4821                                 xSwitchRequired = pdTRUE;
4822                             }
4823                             else
4824                             {
4825                                 mtCOVERAGE_TEST_MARKER();
4826                             }
4827                         }
4828                         #else /* #if( configNUMBER_OF_CORES == 1 ) */
4829                         {
4830                             prvYieldForTask( pxTCB );
4831                         }
4832                         #endif /* #if( configNUMBER_OF_CORES == 1 ) */
4833                     }
4834                     #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4835                 }
4836             }
4837         }
4838
4839         /* Tasks of equal priority to the currently running task will share
4840          * processing time (time slice) if preemption is on, and the application
4841          * writer has not explicitly turned time slicing off. */
4842         #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
4843         {
4844             #if ( configNUMBER_OF_CORES == 1 )
4845             {
4846                 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U )
4847                 {
4848                     xSwitchRequired = pdTRUE;
4849                 }
4850                 else
4851                 {
4852                     mtCOVERAGE_TEST_MARKER();
4853                 }
4854             }
4855             #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4856             {
4857                 BaseType_t xCoreID;
4858
4859                 for( xCoreID = 0; xCoreID < ( ( BaseType_t ) configNUMBER_OF_CORES ); xCoreID++ )
4860                 {
4861                     if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ) ) > 1U )
4862                     {
4863                         xYieldRequiredForCore[ xCoreID ] = pdTRUE;
4864                     }
4865                     else
4866                     {
4867                         mtCOVERAGE_TEST_MARKER();
4868                     }
4869                 }
4870             }
4871             #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4872         }
4873         #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
4874
4875         #if ( configUSE_TICK_HOOK == 1 )
4876         {
4877             /* Guard against the tick hook being called when the pended tick
4878              * count is being unwound (when the scheduler is being unlocked). */
4879             if( xPendedTicks == ( TickType_t ) 0 )
4880             {
4881                 vApplicationTickHook();
4882             }
4883             else
4884             {
4885                 mtCOVERAGE_TEST_MARKER();
4886             }
4887         }
4888         #endif /* configUSE_TICK_HOOK */
4889
4890         #if ( configUSE_PREEMPTION == 1 )
4891         {
4892             #if ( configNUMBER_OF_CORES == 1 )
4893             {
4894                 /* For single core the core ID is always 0. */
4895                 if( xYieldPendings[ 0 ] != pdFALSE )
4896                 {
4897                     xSwitchRequired = pdTRUE;
4898                 }
4899                 else
4900                 {
4901                     mtCOVERAGE_TEST_MARKER();
4902                 }
4903             }
4904             #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4905             {
4906                 BaseType_t xCoreID, xCurrentCoreID;
4907                 xCurrentCoreID = ( BaseType_t ) portGET_CORE_ID();
4908
4909                 for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
4910                 {
4911                     #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
4912                         if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
4913                     #endif
4914                     {
4915                         if( ( xYieldRequiredForCore[ xCoreID ] != pdFALSE ) || ( xYieldPendings[ xCoreID ] != pdFALSE ) )
4916                         {
4917                             if( xCoreID == xCurrentCoreID )
4918                             {
4919                                 xSwitchRequired = pdTRUE;
4920                             }
4921                             else
4922                             {
4923                                 prvYieldCore( xCoreID );
4924                             }
4925                         }
4926                         else
4927                         {
4928                             mtCOVERAGE_TEST_MARKER();
4929                         }
4930                     }
4931                 }
4932             }
4933             #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4934         }
4935         #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4936     }
4937     else
4938     {
4939         ++xPendedTicks;
4940
4941         /* The tick hook gets called at regular intervals, even if the
4942          * scheduler is locked. */
4943         #if ( configUSE_TICK_HOOK == 1 )
4944         {
4945             vApplicationTickHook();
4946         }
4947         #endif
4948     }
4949
4950     traceRETURN_xTaskIncrementTick( xSwitchRequired );
4951
4952     return xSwitchRequired;
4953 }
4954 /*-----------------------------------------------------------*/
4955
4956 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
4957
4958     void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
4959                                      TaskHookFunction_t pxHookFunction )
4960     {
4961         TCB_t * xTCB;
4962
4963         traceENTER_vTaskSetApplicationTaskTag( xTask, pxHookFunction );
4964
4965         /* If xTask is NULL then it is the task hook of the calling task that is
4966          * getting set. */
4967         if( xTask == NULL )
4968         {
4969             xTCB = ( TCB_t * ) pxCurrentTCB;
4970         }
4971         else
4972         {
4973             xTCB = xTask;
4974         }
4975
4976         /* Save the hook function in the TCB.  A critical section is required as
4977          * the value can be accessed from an interrupt. */
4978         taskENTER_CRITICAL();
4979         {
4980             xTCB->pxTaskTag = pxHookFunction;
4981         }
4982         taskEXIT_CRITICAL();
4983
4984         traceRETURN_vTaskSetApplicationTaskTag();
4985     }
4986
4987 #endif /* configUSE_APPLICATION_TASK_TAG */
4988 /*-----------------------------------------------------------*/
4989
4990 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
4991
4992     TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
4993     {
4994         TCB_t * pxTCB;
4995         TaskHookFunction_t xReturn;
4996
4997         traceENTER_xTaskGetApplicationTaskTag( xTask );
4998
4999         /* If xTask is NULL then set the calling task's hook. */
5000         pxTCB = prvGetTCBFromHandle( xTask );
5001
5002         /* Save the hook function in the TCB.  A critical section is required as
5003          * the value can be accessed from an interrupt. */
5004         taskENTER_CRITICAL();
5005         {
5006             xReturn = pxTCB->pxTaskTag;
5007         }
5008         taskEXIT_CRITICAL();
5009
5010         traceRETURN_xTaskGetApplicationTaskTag( xReturn );
5011
5012         return xReturn;
5013     }
5014
5015 #endif /* configUSE_APPLICATION_TASK_TAG */
5016 /*-----------------------------------------------------------*/
5017
5018 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
5019
5020     TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
5021     {
5022         TCB_t * pxTCB;
5023         TaskHookFunction_t xReturn;
5024         UBaseType_t uxSavedInterruptStatus;
5025
5026         traceENTER_xTaskGetApplicationTaskTagFromISR( xTask );
5027
5028         /* If xTask is NULL then set the calling task's hook. */
5029         pxTCB = prvGetTCBFromHandle( xTask );
5030
5031         /* Save the hook function in the TCB.  A critical section is required as
5032          * the value can be accessed from an interrupt. */
5033         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
5034         {
5035             xReturn = pxTCB->pxTaskTag;
5036         }
5037         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
5038
5039         traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn );
5040
5041         return xReturn;
5042     }
5043
5044 #endif /* configUSE_APPLICATION_TASK_TAG */
5045 /*-----------------------------------------------------------*/
5046
5047 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
5048
5049     BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
5050                                              void * pvParameter )
5051     {
5052         TCB_t * xTCB;
5053         BaseType_t xReturn;
5054
5055         traceENTER_xTaskCallApplicationTaskHook( xTask, pvParameter );
5056
5057         /* If xTask is NULL then we are calling our own task hook. */
5058         if( xTask == NULL )
5059         {
5060             xTCB = pxCurrentTCB;
5061         }
5062         else
5063         {
5064             xTCB = xTask;
5065         }
5066
5067         if( xTCB->pxTaskTag != NULL )
5068         {
5069             xReturn = xTCB->pxTaskTag( pvParameter );
5070         }
5071         else
5072         {
5073             xReturn = pdFAIL;
5074         }
5075
5076         traceRETURN_xTaskCallApplicationTaskHook( xReturn );
5077
5078         return xReturn;
5079     }
5080
5081 #endif /* configUSE_APPLICATION_TASK_TAG */
5082 /*-----------------------------------------------------------*/
5083
5084 #if ( configNUMBER_OF_CORES == 1 )
5085     void vTaskSwitchContext( void )
5086     {
5087         traceENTER_vTaskSwitchContext();
5088
5089         if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
5090         {
5091             /* The scheduler is currently suspended - do not allow a context
5092              * switch. */
5093             xYieldPendings[ 0 ] = pdTRUE;
5094         }
5095         else
5096         {
5097             xYieldPendings[ 0 ] = pdFALSE;
5098             traceTASK_SWITCHED_OUT();
5099
5100             #if ( configGENERATE_RUN_TIME_STATS == 1 )
5101             {
5102                 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
5103                     portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ 0 ] );
5104                 #else
5105                     ulTotalRunTime[ 0 ] = portGET_RUN_TIME_COUNTER_VALUE();
5106                 #endif
5107
5108                 /* Add the amount of time the task has been running to the
5109                  * accumulated time so far.  The time the task started running was
5110                  * stored in ulTaskSwitchedInTime.  Note that there is no overflow
5111                  * protection here so count values are only valid until the timer
5112                  * overflows.  The guard against negative values is to protect
5113                  * against suspect run time stat counter implementations - which
5114                  * are provided by the application, not the kernel. */
5115                 if( ulTotalRunTime[ 0 ] > ulTaskSwitchedInTime[ 0 ] )
5116                 {
5117                     pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ 0 ] - ulTaskSwitchedInTime[ 0 ] );
5118                 }
5119                 else
5120                 {
5121                     mtCOVERAGE_TEST_MARKER();
5122                 }
5123
5124                 ulTaskSwitchedInTime[ 0 ] = ulTotalRunTime[ 0 ];
5125             }
5126             #endif /* configGENERATE_RUN_TIME_STATS */
5127
5128             /* Check for stack overflow, if configured. */
5129             taskCHECK_FOR_STACK_OVERFLOW();
5130
5131             /* Before the currently running task is switched out, save its errno. */
5132             #if ( configUSE_POSIX_ERRNO == 1 )
5133             {
5134                 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
5135             }
5136             #endif
5137
5138             /* Select a new task to run using either the generic C or port
5139              * optimised asm code. */
5140             /* MISRA Ref 11.5.3 [Void pointer assignment] */
5141             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
5142             /* coverity[misra_c_2012_rule_11_5_violation] */
5143             taskSELECT_HIGHEST_PRIORITY_TASK();
5144             traceTASK_SWITCHED_IN();
5145
5146             /* Macro to inject port specific behaviour immediately after
5147              * switching tasks, such as setting an end of stack watchpoint
5148              * or reconfiguring the MPU. */
5149             portTASK_SWITCH_HOOK( pxCurrentTCB );
5150
5151             /* After the new task is switched in, update the global errno. */
5152             #if ( configUSE_POSIX_ERRNO == 1 )
5153             {
5154                 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
5155             }
5156             #endif
5157
5158             #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
5159             {
5160                 /* Switch C-Runtime's TLS Block to point to the TLS
5161                  * Block specific to this task. */
5162                 configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
5163             }
5164             #endif
5165         }
5166
5167         traceRETURN_vTaskSwitchContext();
5168     }
5169 #else /* if ( configNUMBER_OF_CORES == 1 ) */
5170     void vTaskSwitchContext( BaseType_t xCoreID )
5171     {
5172         traceENTER_vTaskSwitchContext();
5173
5174         /* Acquire both locks:
5175          * - The ISR lock protects the ready list from simultaneous access by
5176          *   both other ISRs and tasks.
5177          * - We also take the task lock to pause here in case another core has
5178          *   suspended the scheduler. We don't want to simply set xYieldPending
5179          *   and move on if another core suspended the scheduler. We should only
5180          *   do that if the current core has suspended the scheduler. */
5181
5182         portGET_TASK_LOCK(); /* Must always acquire the task lock first. */
5183         portGET_ISR_LOCK();
5184         {
5185             /* vTaskSwitchContext() must never be called from within a critical section.
5186              * This is not necessarily true for single core FreeRTOS, but it is for this
5187              * SMP port. */
5188             configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 );
5189
5190             if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
5191             {
5192                 /* The scheduler is currently suspended - do not allow a context
5193                  * switch. */
5194                 xYieldPendings[ xCoreID ] = pdTRUE;
5195             }
5196             else
5197             {
5198                 xYieldPendings[ xCoreID ] = pdFALSE;
5199                 traceTASK_SWITCHED_OUT();
5200
5201                 #if ( configGENERATE_RUN_TIME_STATS == 1 )
5202                 {
5203                     #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
5204                         portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ xCoreID ] );
5205                     #else
5206                         ulTotalRunTime[ xCoreID ] = portGET_RUN_TIME_COUNTER_VALUE();
5207                     #endif
5208
5209                     /* Add the amount of time the task has been running to the
5210                      * accumulated time so far.  The time the task started running was
5211                      * stored in ulTaskSwitchedInTime.  Note that there is no overflow
5212                      * protection here so count values are only valid until the timer
5213                      * overflows.  The guard against negative values is to protect
5214                      * against suspect run time stat counter implementations - which
5215                      * are provided by the application, not the kernel. */
5216                     if( ulTotalRunTime[ xCoreID ] > ulTaskSwitchedInTime[ xCoreID ] )
5217                     {
5218                         pxCurrentTCBs[ xCoreID ]->ulRunTimeCounter += ( ulTotalRunTime[ xCoreID ] - ulTaskSwitchedInTime[ xCoreID ] );
5219                     }
5220                     else
5221                     {
5222                         mtCOVERAGE_TEST_MARKER();
5223                     }
5224
5225                     ulTaskSwitchedInTime[ xCoreID ] = ulTotalRunTime[ xCoreID ];
5226                 }
5227                 #endif /* configGENERATE_RUN_TIME_STATS */
5228
5229                 /* Check for stack overflow, if configured. */
5230                 taskCHECK_FOR_STACK_OVERFLOW();
5231
5232                 /* Before the currently running task is switched out, save its errno. */
5233                 #if ( configUSE_POSIX_ERRNO == 1 )
5234                 {
5235                     pxCurrentTCBs[ xCoreID ]->iTaskErrno = FreeRTOS_errno;
5236                 }
5237                 #endif
5238
5239                 /* Select a new task to run. */
5240                 taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID );
5241                 traceTASK_SWITCHED_IN();
5242
5243                 /* Macro to inject port specific behaviour immediately after
5244                  * switching tasks, such as setting an end of stack watchpoint
5245                  * or reconfiguring the MPU. */
5246                 portTASK_SWITCH_HOOK( pxCurrentTCBs[ portGET_CORE_ID() ] );
5247
5248                 /* After the new task is switched in, update the global errno. */
5249                 #if ( configUSE_POSIX_ERRNO == 1 )
5250                 {
5251                     FreeRTOS_errno = pxCurrentTCBs[ xCoreID ]->iTaskErrno;
5252                 }
5253                 #endif
5254
5255                 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
5256                 {
5257                     /* Switch C-Runtime's TLS Block to point to the TLS
5258                      * Block specific to this task. */
5259                     configSET_TLS_BLOCK( pxCurrentTCBs[ xCoreID ]->xTLSBlock );
5260                 }
5261                 #endif
5262             }
5263         }
5264         portRELEASE_ISR_LOCK();
5265         portRELEASE_TASK_LOCK();
5266
5267         traceRETURN_vTaskSwitchContext();
5268     }
5269 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
5270 /*-----------------------------------------------------------*/
5271
5272 void vTaskPlaceOnEventList( List_t * const pxEventList,
5273                             const TickType_t xTicksToWait )
5274 {
5275     traceENTER_vTaskPlaceOnEventList( pxEventList, xTicksToWait );
5276
5277     configASSERT( pxEventList );
5278
5279     /* THIS FUNCTION MUST BE CALLED WITH THE
5280      * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
5281
5282     /* Place the event list item of the TCB in the appropriate event list.
5283      * This is placed in the list in priority order so the highest priority task
5284      * is the first to be woken by the event.
5285      *
5286      * Note: Lists are sorted in ascending order by ListItem_t.xItemValue.
5287      * Normally, the xItemValue of a TCB's ListItem_t members is:
5288      *      xItemValue = ( configMAX_PRIORITIES - uxPriority )
5289      * Therefore, the event list is sorted in descending priority order.
5290      *
5291      * The queue that contains the event list is locked, preventing
5292      * simultaneous access from interrupts. */
5293     vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
5294
5295     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
5296
5297     traceRETURN_vTaskPlaceOnEventList();
5298 }
5299 /*-----------------------------------------------------------*/
5300
5301 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
5302                                      const TickType_t xItemValue,
5303                                      const TickType_t xTicksToWait )
5304 {
5305     traceENTER_vTaskPlaceOnUnorderedEventList( pxEventList, xItemValue, xTicksToWait );
5306
5307     configASSERT( pxEventList );
5308
5309     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED.  It is used by
5310      * the event groups implementation. */
5311     configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
5312
5313     /* Store the item value in the event list item.  It is safe to access the
5314      * event list item here as interrupts won't access the event list item of a
5315      * task that is not in the Blocked state. */
5316     listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
5317
5318     /* Place the event list item of the TCB at the end of the appropriate event
5319      * list.  It is safe to access the event list here because it is part of an
5320      * event group implementation - and interrupts don't access event groups
5321      * directly (instead they access them indirectly by pending function calls to
5322      * the task level). */
5323     listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
5324
5325     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
5326
5327     traceRETURN_vTaskPlaceOnUnorderedEventList();
5328 }
5329 /*-----------------------------------------------------------*/
5330
5331 #if ( configUSE_TIMERS == 1 )
5332
5333     void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
5334                                           TickType_t xTicksToWait,
5335                                           const BaseType_t xWaitIndefinitely )
5336     {
5337         traceENTER_vTaskPlaceOnEventListRestricted( pxEventList, xTicksToWait, xWaitIndefinitely );
5338
5339         configASSERT( pxEventList );
5340
5341         /* This function should not be called by application code hence the
5342          * 'Restricted' in its name.  It is not part of the public API.  It is
5343          * designed for use by kernel code, and has special calling requirements -
5344          * it should be called with the scheduler suspended. */
5345
5346
5347         /* Place the event list item of the TCB in the appropriate event list.
5348          * In this case it is assume that this is the only task that is going to
5349          * be waiting on this event list, so the faster vListInsertEnd() function
5350          * can be used in place of vListInsert. */
5351         listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
5352
5353         /* If the task should block indefinitely then set the block time to a
5354          * value that will be recognised as an indefinite delay inside the
5355          * prvAddCurrentTaskToDelayedList() function. */
5356         if( xWaitIndefinitely != pdFALSE )
5357         {
5358             xTicksToWait = portMAX_DELAY;
5359         }
5360
5361         traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
5362         prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
5363
5364         traceRETURN_vTaskPlaceOnEventListRestricted();
5365     }
5366
5367 #endif /* configUSE_TIMERS */
5368 /*-----------------------------------------------------------*/
5369
5370 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
5371 {
5372     TCB_t * pxUnblockedTCB;
5373     BaseType_t xReturn;
5374
5375     traceENTER_xTaskRemoveFromEventList( pxEventList );
5376
5377     /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION.  It can also be
5378      * called from a critical section within an ISR. */
5379
5380     /* The event list is sorted in priority order, so the first in the list can
5381      * be removed as it is known to be the highest priority.  Remove the TCB from
5382      * the delayed list, and add it to the ready list.
5383      *
5384      * If an event is for a queue that is locked then this function will never
5385      * get called - the lock count on the queue will get modified instead.  This
5386      * means exclusive access to the event list is guaranteed here.
5387      *
5388      * This function assumes that a check has already been made to ensure that
5389      * pxEventList is not empty. */
5390     /* MISRA Ref 11.5.3 [Void pointer assignment] */
5391     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
5392     /* coverity[misra_c_2012_rule_11_5_violation] */
5393     pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList );
5394     configASSERT( pxUnblockedTCB );
5395     listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
5396
5397     if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
5398     {
5399         listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
5400         prvAddTaskToReadyList( pxUnblockedTCB );
5401
5402         #if ( configUSE_TICKLESS_IDLE != 0 )
5403         {
5404             /* If a task is blocked on a kernel object then xNextTaskUnblockTime
5405              * might be set to the blocked task's time out time.  If the task is
5406              * unblocked for a reason other than a timeout xNextTaskUnblockTime is
5407              * normally left unchanged, because it is automatically reset to a new
5408              * value when the tick count equals xNextTaskUnblockTime.  However if
5409              * tickless idling is used it might be more important to enter sleep mode
5410              * at the earliest possible time - so reset xNextTaskUnblockTime here to
5411              * ensure it is updated at the earliest possible time. */
5412             prvResetNextTaskUnblockTime();
5413         }
5414         #endif
5415     }
5416     else
5417     {
5418         /* The delayed and ready lists cannot be accessed, so hold this task
5419          * pending until the scheduler is resumed. */
5420         listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
5421     }
5422
5423     #if ( configNUMBER_OF_CORES == 1 )
5424     {
5425         if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
5426         {
5427             /* Return true if the task removed from the event list has a higher
5428              * priority than the calling task.  This allows the calling task to know if
5429              * it should force a context switch now. */
5430             xReturn = pdTRUE;
5431
5432             /* Mark that a yield is pending in case the user is not using the
5433              * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
5434             xYieldPendings[ 0 ] = pdTRUE;
5435         }
5436         else
5437         {
5438             xReturn = pdFALSE;
5439         }
5440     }
5441     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
5442     {
5443         xReturn = pdFALSE;
5444
5445         #if ( configUSE_PREEMPTION == 1 )
5446         {
5447             prvYieldForTask( pxUnblockedTCB );
5448
5449             if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
5450             {
5451                 xReturn = pdTRUE;
5452             }
5453         }
5454         #endif /* #if ( configUSE_PREEMPTION == 1 ) */
5455     }
5456     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
5457
5458     traceRETURN_xTaskRemoveFromEventList( xReturn );
5459     return xReturn;
5460 }
5461 /*-----------------------------------------------------------*/
5462
5463 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
5464                                         const TickType_t xItemValue )
5465 {
5466     TCB_t * pxUnblockedTCB;
5467
5468     traceENTER_vTaskRemoveFromUnorderedEventList( pxEventListItem, xItemValue );
5469
5470     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED.  It is used by
5471      * the event flags implementation. */
5472     configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
5473
5474     /* Store the new item value in the event list. */
5475     listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
5476
5477     /* Remove the event list form the event flag.  Interrupts do not access
5478      * event flags. */
5479     /* MISRA Ref 11.5.3 [Void pointer assignment] */
5480     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
5481     /* coverity[misra_c_2012_rule_11_5_violation] */
5482     pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem );
5483     configASSERT( pxUnblockedTCB );
5484     listREMOVE_ITEM( pxEventListItem );
5485
5486     #if ( configUSE_TICKLESS_IDLE != 0 )
5487     {
5488         /* If a task is blocked on a kernel object then xNextTaskUnblockTime
5489          * might be set to the blocked task's time out time.  If the task is
5490          * unblocked for a reason other than a timeout xNextTaskUnblockTime is
5491          * normally left unchanged, because it is automatically reset to a new
5492          * value when the tick count equals xNextTaskUnblockTime.  However if
5493          * tickless idling is used it might be more important to enter sleep mode
5494          * at the earliest possible time - so reset xNextTaskUnblockTime here to
5495          * ensure it is updated at the earliest possible time. */
5496         prvResetNextTaskUnblockTime();
5497     }
5498     #endif
5499
5500     /* Remove the task from the delayed list and add it to the ready list.  The
5501      * scheduler is suspended so interrupts will not be accessing the ready
5502      * lists. */
5503     listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
5504     prvAddTaskToReadyList( pxUnblockedTCB );
5505
5506     #if ( configNUMBER_OF_CORES == 1 )
5507     {
5508         if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
5509         {
5510             /* The unblocked task has a priority above that of the calling task, so
5511              * a context switch is required.  This function is called with the
5512              * scheduler suspended so xYieldPending is set so the context switch
5513              * occurs immediately that the scheduler is resumed (unsuspended). */
5514             xYieldPendings[ 0 ] = pdTRUE;
5515         }
5516     }
5517     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
5518     {
5519         #if ( configUSE_PREEMPTION == 1 )
5520         {
5521             taskENTER_CRITICAL();
5522             {
5523                 prvYieldForTask( pxUnblockedTCB );
5524             }
5525             taskEXIT_CRITICAL();
5526         }
5527         #endif
5528     }
5529     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
5530
5531     traceRETURN_vTaskRemoveFromUnorderedEventList();
5532 }
5533 /*-----------------------------------------------------------*/
5534
5535 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
5536 {
5537     traceENTER_vTaskSetTimeOutState( pxTimeOut );
5538
5539     configASSERT( pxTimeOut );
5540     taskENTER_CRITICAL();
5541     {
5542         pxTimeOut->xOverflowCount = xNumOfOverflows;
5543         pxTimeOut->xTimeOnEntering = xTickCount;
5544     }
5545     taskEXIT_CRITICAL();
5546
5547     traceRETURN_vTaskSetTimeOutState();
5548 }
5549 /*-----------------------------------------------------------*/
5550
5551 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
5552 {
5553     traceENTER_vTaskInternalSetTimeOutState( pxTimeOut );
5554
5555     /* For internal use only as it does not use a critical section. */
5556     pxTimeOut->xOverflowCount = xNumOfOverflows;
5557     pxTimeOut->xTimeOnEntering = xTickCount;
5558
5559     traceRETURN_vTaskInternalSetTimeOutState();
5560 }
5561 /*-----------------------------------------------------------*/
5562
5563 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
5564                                  TickType_t * const pxTicksToWait )
5565 {
5566     BaseType_t xReturn;
5567
5568     traceENTER_xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait );
5569
5570     configASSERT( pxTimeOut );
5571     configASSERT( pxTicksToWait );
5572
5573     taskENTER_CRITICAL();
5574     {
5575         /* Minor optimisation.  The tick count cannot change in this block. */
5576         const TickType_t xConstTickCount = xTickCount;
5577         const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
5578
5579         #if ( INCLUDE_xTaskAbortDelay == 1 )
5580             if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
5581             {
5582                 /* The delay was aborted, which is not the same as a time out,
5583                  * but has the same result. */
5584                 pxCurrentTCB->ucDelayAborted = pdFALSE;
5585                 xReturn = pdTRUE;
5586             }
5587             else
5588         #endif
5589
5590         #if ( INCLUDE_vTaskSuspend == 1 )
5591             if( *pxTicksToWait == portMAX_DELAY )
5592             {
5593                 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
5594                  * specified is the maximum block time then the task should block
5595                  * indefinitely, and therefore never time out. */
5596                 xReturn = pdFALSE;
5597             }
5598             else
5599         #endif
5600
5601         if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) )
5602         {
5603             /* The tick count is greater than the time at which
5604              * vTaskSetTimeout() was called, but has also overflowed since
5605              * vTaskSetTimeOut() was called.  It must have wrapped all the way
5606              * around and gone past again. This passed since vTaskSetTimeout()
5607              * was called. */
5608             xReturn = pdTRUE;
5609             *pxTicksToWait = ( TickType_t ) 0;
5610         }
5611         else if( xElapsedTime < *pxTicksToWait )
5612         {
5613             /* Not a genuine timeout. Adjust parameters for time remaining. */
5614             *pxTicksToWait -= xElapsedTime;
5615             vTaskInternalSetTimeOutState( pxTimeOut );
5616             xReturn = pdFALSE;
5617         }
5618         else
5619         {
5620             *pxTicksToWait = ( TickType_t ) 0;
5621             xReturn = pdTRUE;
5622         }
5623     }
5624     taskEXIT_CRITICAL();
5625
5626     traceRETURN_xTaskCheckForTimeOut( xReturn );
5627
5628     return xReturn;
5629 }
5630 /*-----------------------------------------------------------*/
5631
5632 void vTaskMissedYield( void )
5633 {
5634     traceENTER_vTaskMissedYield();
5635
5636     /* Must be called from within a critical section. */
5637     xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
5638
5639     traceRETURN_vTaskMissedYield();
5640 }
5641 /*-----------------------------------------------------------*/
5642
5643 #if ( configUSE_TRACE_FACILITY == 1 )
5644
5645     UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
5646     {
5647         UBaseType_t uxReturn;
5648         TCB_t const * pxTCB;
5649
5650         traceENTER_uxTaskGetTaskNumber( xTask );
5651
5652         if( xTask != NULL )
5653         {
5654             pxTCB = xTask;
5655             uxReturn = pxTCB->uxTaskNumber;
5656         }
5657         else
5658         {
5659             uxReturn = 0U;
5660         }
5661
5662         traceRETURN_uxTaskGetTaskNumber( uxReturn );
5663
5664         return uxReturn;
5665     }
5666
5667 #endif /* configUSE_TRACE_FACILITY */
5668 /*-----------------------------------------------------------*/
5669
5670 #if ( configUSE_TRACE_FACILITY == 1 )
5671
5672     void vTaskSetTaskNumber( TaskHandle_t xTask,
5673                              const UBaseType_t uxHandle )
5674     {
5675         TCB_t * pxTCB;
5676
5677         traceENTER_vTaskSetTaskNumber( xTask, uxHandle );
5678
5679         if( xTask != NULL )
5680         {
5681             pxTCB = xTask;
5682             pxTCB->uxTaskNumber = uxHandle;
5683         }
5684
5685         traceRETURN_vTaskSetTaskNumber();
5686     }
5687
5688 #endif /* configUSE_TRACE_FACILITY */
5689 /*-----------------------------------------------------------*/
5690
5691 /*
5692  * -----------------------------------------------------------
5693  * The passive idle task.
5694  * ----------------------------------------------------------
5695  *
5696  * The passive idle task is used for all the additional cores in a SMP
5697  * system. There must be only 1 active idle task and the rest are passive
5698  * idle tasks.
5699  *
5700  * The portTASK_FUNCTION() macro is used to allow port/compiler specific
5701  * language extensions.  The equivalent prototype for this function is:
5702  *
5703  * void prvPassiveIdleTask( void *pvParameters );
5704  */
5705
5706 #if ( configNUMBER_OF_CORES > 1 )
5707     static portTASK_FUNCTION( prvPassiveIdleTask, pvParameters )
5708     {
5709         ( void ) pvParameters;
5710
5711         taskYIELD();
5712
5713         for( ; configCONTROL_INFINITE_LOOP(); )
5714         {
5715             #if ( configUSE_PREEMPTION == 0 )
5716             {
5717                 /* If we are not using preemption we keep forcing a task switch to
5718                  * see if any other task has become available.  If we are using
5719                  * preemption we don't need to do this as any task becoming available
5720                  * will automatically get the processor anyway. */
5721                 taskYIELD();
5722             }
5723             #endif /* configUSE_PREEMPTION */
5724
5725             #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
5726             {
5727                 /* When using preemption tasks of equal priority will be
5728                  * timesliced.  If a task that is sharing the idle priority is ready
5729                  * to run then the idle task should yield before the end of the
5730                  * timeslice.
5731                  *
5732                  * A critical region is not required here as we are just reading from
5733                  * the list, and an occasional incorrect value will not matter.  If
5734                  * the ready list at the idle priority contains one more task than the
5735                  * number of idle tasks, which is equal to the configured numbers of cores
5736                  * then a task other than the idle task is ready to execute. */
5737                 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
5738                 {
5739                     taskYIELD();
5740                 }
5741                 else
5742                 {
5743                     mtCOVERAGE_TEST_MARKER();
5744                 }
5745             }
5746             #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
5747
5748             #if ( configUSE_PASSIVE_IDLE_HOOK == 1 )
5749             {
5750                 /* Call the user defined function from within the idle task.  This
5751                  * allows the application designer to add background functionality
5752                  * without the overhead of a separate task.
5753                  *
5754                  * This hook is intended to manage core activity such as disabling cores that go idle.
5755                  *
5756                  * NOTE: vApplicationPassiveIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
5757                  * CALL A FUNCTION THAT MIGHT BLOCK. */
5758                 vApplicationPassiveIdleHook();
5759             }
5760             #endif /* configUSE_PASSIVE_IDLE_HOOK */
5761         }
5762     }
5763 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
5764
5765 /*
5766  * -----------------------------------------------------------
5767  * The idle task.
5768  * ----------------------------------------------------------
5769  *
5770  * The portTASK_FUNCTION() macro is used to allow port/compiler specific
5771  * language extensions.  The equivalent prototype for this function is:
5772  *
5773  * void prvIdleTask( void *pvParameters );
5774  *
5775  */
5776
5777 static portTASK_FUNCTION( prvIdleTask, pvParameters )
5778 {
5779     /* Stop warnings. */
5780     ( void ) pvParameters;
5781
5782     /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
5783      * SCHEDULER IS STARTED. **/
5784
5785     /* In case a task that has a secure context deletes itself, in which case
5786      * the idle task is responsible for deleting the task's secure context, if
5787      * any. */
5788     portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
5789
5790     #if ( configNUMBER_OF_CORES > 1 )
5791     {
5792         /* SMP all cores start up in the idle task. This initial yield gets the application
5793          * tasks started. */
5794         taskYIELD();
5795     }
5796     #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
5797
5798     for( ; configCONTROL_INFINITE_LOOP(); )
5799     {
5800         /* See if any tasks have deleted themselves - if so then the idle task
5801          * is responsible for freeing the deleted task's TCB and stack. */
5802         prvCheckTasksWaitingTermination();
5803
5804         #if ( configUSE_PREEMPTION == 0 )
5805         {
5806             /* If we are not using preemption we keep forcing a task switch to
5807              * see if any other task has become available.  If we are using
5808              * preemption we don't need to do this as any task becoming available
5809              * will automatically get the processor anyway. */
5810             taskYIELD();
5811         }
5812         #endif /* configUSE_PREEMPTION */
5813
5814         #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
5815         {
5816             /* When using preemption tasks of equal priority will be
5817              * timesliced.  If a task that is sharing the idle priority is ready
5818              * to run then the idle task should yield before the end of the
5819              * timeslice.
5820              *
5821              * A critical region is not required here as we are just reading from
5822              * the list, and an occasional incorrect value will not matter.  If
5823              * the ready list at the idle priority contains one more task than the
5824              * number of idle tasks, which is equal to the configured numbers of cores
5825              * then a task other than the idle task is ready to execute. */
5826             if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
5827             {
5828                 taskYIELD();
5829             }
5830             else
5831             {
5832                 mtCOVERAGE_TEST_MARKER();
5833             }
5834         }
5835         #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
5836
5837         #if ( configUSE_IDLE_HOOK == 1 )
5838         {
5839             /* Call the user defined function from within the idle task. */
5840             vApplicationIdleHook();
5841         }
5842         #endif /* configUSE_IDLE_HOOK */
5843
5844         /* This conditional compilation should use inequality to 0, not equality
5845          * to 1.  This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
5846          * user defined low power mode  implementations require
5847          * configUSE_TICKLESS_IDLE to be set to a value other than 1. */
5848         #if ( configUSE_TICKLESS_IDLE != 0 )
5849         {
5850             TickType_t xExpectedIdleTime;
5851
5852             /* It is not desirable to suspend then resume the scheduler on
5853              * each iteration of the idle task.  Therefore, a preliminary
5854              * test of the expected idle time is performed without the
5855              * scheduler suspended.  The result here is not necessarily
5856              * valid. */
5857             xExpectedIdleTime = prvGetExpectedIdleTime();
5858
5859             if( xExpectedIdleTime >= ( TickType_t ) configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
5860             {
5861                 vTaskSuspendAll();
5862                 {
5863                     /* Now the scheduler is suspended, the expected idle
5864                      * time can be sampled again, and this time its value can
5865                      * be used. */
5866                     configASSERT( xNextTaskUnblockTime >= xTickCount );
5867                     xExpectedIdleTime = prvGetExpectedIdleTime();
5868
5869                     /* Define the following macro to set xExpectedIdleTime to 0
5870                      * if the application does not want
5871                      * portSUPPRESS_TICKS_AND_SLEEP() to be called. */
5872                     configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
5873
5874                     if( xExpectedIdleTime >= ( TickType_t ) configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
5875                     {
5876                         traceLOW_POWER_IDLE_BEGIN();
5877                         portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
5878                         traceLOW_POWER_IDLE_END();
5879                     }
5880                     else
5881                     {
5882                         mtCOVERAGE_TEST_MARKER();
5883                     }
5884                 }
5885                 ( void ) xTaskResumeAll();
5886             }
5887             else
5888             {
5889                 mtCOVERAGE_TEST_MARKER();
5890             }
5891         }
5892         #endif /* configUSE_TICKLESS_IDLE */
5893
5894         #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PASSIVE_IDLE_HOOK == 1 ) )
5895         {
5896             /* Call the user defined function from within the idle task.  This
5897              * allows the application designer to add background functionality
5898              * without the overhead of a separate task.
5899              *
5900              * This hook is intended to manage core activity such as disabling cores that go idle.
5901              *
5902              * NOTE: vApplicationPassiveIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
5903              * CALL A FUNCTION THAT MIGHT BLOCK. */
5904             vApplicationPassiveIdleHook();
5905         }
5906         #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PASSIVE_IDLE_HOOK == 1 ) ) */
5907     }
5908 }
5909 /*-----------------------------------------------------------*/
5910
5911 #if ( configUSE_TICKLESS_IDLE != 0 )
5912
5913     eSleepModeStatus eTaskConfirmSleepModeStatus( void )
5914     {
5915         #if ( INCLUDE_vTaskSuspend == 1 )
5916             /* The idle task exists in addition to the application tasks. */
5917             const UBaseType_t uxNonApplicationTasks = configNUMBER_OF_CORES;
5918         #endif /* INCLUDE_vTaskSuspend */
5919
5920         eSleepModeStatus eReturn = eStandardSleep;
5921
5922         traceENTER_eTaskConfirmSleepModeStatus();
5923
5924         /* This function must be called from a critical section. */
5925
5926         if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0U )
5927         {
5928             /* A task was made ready while the scheduler was suspended. */
5929             eReturn = eAbortSleep;
5930         }
5931         else if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
5932         {
5933             /* A yield was pended while the scheduler was suspended. */
5934             eReturn = eAbortSleep;
5935         }
5936         else if( xPendedTicks != 0U )
5937         {
5938             /* A tick interrupt has already occurred but was held pending
5939              * because the scheduler is suspended. */
5940             eReturn = eAbortSleep;
5941         }
5942
5943         #if ( INCLUDE_vTaskSuspend == 1 )
5944             else if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
5945             {
5946                 /* If all the tasks are in the suspended list (which might mean they
5947                  * have an infinite block time rather than actually being suspended)
5948                  * then it is safe to turn all clocks off and just wait for external
5949                  * interrupts. */
5950                 eReturn = eNoTasksWaitingTimeout;
5951             }
5952         #endif /* INCLUDE_vTaskSuspend */
5953         else
5954         {
5955             mtCOVERAGE_TEST_MARKER();
5956         }
5957
5958         traceRETURN_eTaskConfirmSleepModeStatus( eReturn );
5959
5960         return eReturn;
5961     }
5962
5963 #endif /* configUSE_TICKLESS_IDLE */
5964 /*-----------------------------------------------------------*/
5965
5966 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
5967
5968     void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
5969                                             BaseType_t xIndex,
5970                                             void * pvValue )
5971     {
5972         TCB_t * pxTCB;
5973
5974         traceENTER_vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue );
5975
5976         if( ( xIndex >= 0 ) &&
5977             ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
5978         {
5979             pxTCB = prvGetTCBFromHandle( xTaskToSet );
5980             configASSERT( pxTCB != NULL );
5981             pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
5982         }
5983
5984         traceRETURN_vTaskSetThreadLocalStoragePointer();
5985     }
5986
5987 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
5988 /*-----------------------------------------------------------*/
5989
5990 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
5991
5992     void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
5993                                                BaseType_t xIndex )
5994     {
5995         void * pvReturn = NULL;
5996         TCB_t * pxTCB;
5997
5998         traceENTER_pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex );
5999
6000         if( ( xIndex >= 0 ) &&
6001             ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
6002         {
6003             pxTCB = prvGetTCBFromHandle( xTaskToQuery );
6004             pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
6005         }
6006         else
6007         {
6008             pvReturn = NULL;
6009         }
6010
6011         traceRETURN_pvTaskGetThreadLocalStoragePointer( pvReturn );
6012
6013         return pvReturn;
6014     }
6015
6016 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
6017 /*-----------------------------------------------------------*/
6018
6019 #if ( portUSING_MPU_WRAPPERS == 1 )
6020
6021     void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
6022                                   const MemoryRegion_t * const pxRegions )
6023     {
6024         TCB_t * pxTCB;
6025
6026         traceENTER_vTaskAllocateMPURegions( xTaskToModify, pxRegions );
6027
6028         /* If null is passed in here then we are modifying the MPU settings of
6029          * the calling task. */
6030         pxTCB = prvGetTCBFromHandle( xTaskToModify );
6031
6032         vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), pxRegions, NULL, 0 );
6033
6034         traceRETURN_vTaskAllocateMPURegions();
6035     }
6036
6037 #endif /* portUSING_MPU_WRAPPERS */
6038 /*-----------------------------------------------------------*/
6039
6040 static void prvInitialiseTaskLists( void )
6041 {
6042     UBaseType_t uxPriority;
6043
6044     for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
6045     {
6046         vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
6047     }
6048
6049     vListInitialise( &xDelayedTaskList1 );
6050     vListInitialise( &xDelayedTaskList2 );
6051     vListInitialise( &xPendingReadyList );
6052
6053     #if ( INCLUDE_vTaskDelete == 1 )
6054     {
6055         vListInitialise( &xTasksWaitingTermination );
6056     }
6057     #endif /* INCLUDE_vTaskDelete */
6058
6059     #if ( INCLUDE_vTaskSuspend == 1 )
6060     {
6061         vListInitialise( &xSuspendedTaskList );
6062     }
6063     #endif /* INCLUDE_vTaskSuspend */
6064
6065     /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
6066      * using list2. */
6067     pxDelayedTaskList = &xDelayedTaskList1;
6068     pxOverflowDelayedTaskList = &xDelayedTaskList2;
6069 }
6070 /*-----------------------------------------------------------*/
6071
6072 static void prvCheckTasksWaitingTermination( void )
6073 {
6074     /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
6075
6076     #if ( INCLUDE_vTaskDelete == 1 )
6077     {
6078         TCB_t * pxTCB;
6079
6080         /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
6081          * being called too often in the idle task. */
6082         while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
6083         {
6084             #if ( configNUMBER_OF_CORES == 1 )
6085             {
6086                 taskENTER_CRITICAL();
6087                 {
6088                     {
6089                         /* MISRA Ref 11.5.3 [Void pointer assignment] */
6090                         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6091                         /* coverity[misra_c_2012_rule_11_5_violation] */
6092                         pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
6093                         ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
6094                         --uxCurrentNumberOfTasks;
6095                         --uxDeletedTasksWaitingCleanUp;
6096                     }
6097                 }
6098                 taskEXIT_CRITICAL();
6099
6100                 prvDeleteTCB( pxTCB );
6101             }
6102             #else /* #if( configNUMBER_OF_CORES == 1 ) */
6103             {
6104                 pxTCB = NULL;
6105
6106                 taskENTER_CRITICAL();
6107                 {
6108                     /* For SMP, multiple idles can be running simultaneously
6109                      * and we need to check that other idles did not cleanup while we were
6110                      * waiting to enter the critical section. */
6111                     if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
6112                     {
6113                         /* MISRA Ref 11.5.3 [Void pointer assignment] */
6114                         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6115                         /* coverity[misra_c_2012_rule_11_5_violation] */
6116                         pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
6117
6118                         if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
6119                         {
6120                             ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
6121                             --uxCurrentNumberOfTasks;
6122                             --uxDeletedTasksWaitingCleanUp;
6123                         }
6124                         else
6125                         {
6126                             /* The TCB to be deleted still has not yet been switched out
6127                              * by the scheduler, so we will just exit this loop early and
6128                              * try again next time. */
6129                             taskEXIT_CRITICAL();
6130                             break;
6131                         }
6132                     }
6133                 }
6134                 taskEXIT_CRITICAL();
6135
6136                 if( pxTCB != NULL )
6137                 {
6138                     prvDeleteTCB( pxTCB );
6139                 }
6140             }
6141             #endif /* #if( configNUMBER_OF_CORES == 1 ) */
6142         }
6143     }
6144     #endif /* INCLUDE_vTaskDelete */
6145 }
6146 /*-----------------------------------------------------------*/
6147
6148 #if ( configUSE_TRACE_FACILITY == 1 )
6149
6150     void vTaskGetInfo( TaskHandle_t xTask,
6151                        TaskStatus_t * pxTaskStatus,
6152                        BaseType_t xGetFreeStackSpace,
6153                        eTaskState eState )
6154     {
6155         TCB_t * pxTCB;
6156
6157         traceENTER_vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState );
6158
6159         /* xTask is NULL then get the state of the calling task. */
6160         pxTCB = prvGetTCBFromHandle( xTask );
6161
6162         pxTaskStatus->xHandle = pxTCB;
6163         pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
6164         pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
6165         pxTaskStatus->pxStackBase = pxTCB->pxStack;
6166         #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
6167             pxTaskStatus->pxTopOfStack = ( StackType_t * ) pxTCB->pxTopOfStack;
6168             pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack;
6169         #endif
6170         pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
6171
6172         #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
6173         {
6174             pxTaskStatus->uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
6175         }
6176         #endif
6177
6178         #if ( configUSE_MUTEXES == 1 )
6179         {
6180             pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
6181         }
6182         #else
6183         {
6184             pxTaskStatus->uxBasePriority = 0;
6185         }
6186         #endif
6187
6188         #if ( configGENERATE_RUN_TIME_STATS == 1 )
6189         {
6190             pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
6191         }
6192         #else
6193         {
6194             pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0;
6195         }
6196         #endif
6197
6198         /* Obtaining the task state is a little fiddly, so is only done if the
6199          * value of eState passed into this function is eInvalid - otherwise the
6200          * state is just set to whatever is passed in. */
6201         if( eState != eInvalid )
6202         {
6203             if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6204             {
6205                 pxTaskStatus->eCurrentState = eRunning;
6206             }
6207             else
6208             {
6209                 pxTaskStatus->eCurrentState = eState;
6210
6211                 #if ( INCLUDE_vTaskSuspend == 1 )
6212                 {
6213                     /* If the task is in the suspended list then there is a
6214                      *  chance it is actually just blocked indefinitely - so really
6215                      *  it should be reported as being in the Blocked state. */
6216                     if( eState == eSuspended )
6217                     {
6218                         vTaskSuspendAll();
6219                         {
6220                             if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
6221                             {
6222                                 pxTaskStatus->eCurrentState = eBlocked;
6223                             }
6224                             else
6225                             {
6226                                 BaseType_t x;
6227
6228                                 /* The task does not appear on the event list item of
6229                                  * and of the RTOS objects, but could still be in the
6230                                  * blocked state if it is waiting on its notification
6231                                  * rather than waiting on an object.  If not, is
6232                                  * suspended. */
6233                                 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
6234                                 {
6235                                     if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
6236                                     {
6237                                         pxTaskStatus->eCurrentState = eBlocked;
6238                                         break;
6239                                     }
6240                                 }
6241                             }
6242                         }
6243                         ( void ) xTaskResumeAll();
6244                     }
6245                 }
6246                 #endif /* INCLUDE_vTaskSuspend */
6247
6248                 /* Tasks can be in pending ready list and other state list at the
6249                  * same time. These tasks are in ready state no matter what state
6250                  * list the task is in. */
6251                 taskENTER_CRITICAL();
6252                 {
6253                     if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE )
6254                     {
6255                         pxTaskStatus->eCurrentState = eReady;
6256                     }
6257                 }
6258                 taskEXIT_CRITICAL();
6259             }
6260         }
6261         else
6262         {
6263             pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
6264         }
6265
6266         /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
6267          * parameter is provided to allow it to be skipped. */
6268         if( xGetFreeStackSpace != pdFALSE )
6269         {
6270             #if ( portSTACK_GROWTH > 0 )
6271             {
6272                 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
6273             }
6274             #else
6275             {
6276                 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
6277             }
6278             #endif
6279         }
6280         else
6281         {
6282             pxTaskStatus->usStackHighWaterMark = 0;
6283         }
6284
6285         traceRETURN_vTaskGetInfo();
6286     }
6287
6288 #endif /* configUSE_TRACE_FACILITY */
6289 /*-----------------------------------------------------------*/
6290
6291 #if ( configUSE_TRACE_FACILITY == 1 )
6292
6293     static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
6294                                                      List_t * pxList,
6295                                                      eTaskState eState )
6296     {
6297         configLIST_VOLATILE TCB_t * pxNextTCB;
6298         configLIST_VOLATILE TCB_t * pxFirstTCB;
6299         UBaseType_t uxTask = 0;
6300
6301         if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
6302         {
6303             /* MISRA Ref 11.5.3 [Void pointer assignment] */
6304             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6305             /* coverity[misra_c_2012_rule_11_5_violation] */
6306             listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
6307
6308             /* Populate an TaskStatus_t structure within the
6309              * pxTaskStatusArray array for each task that is referenced from
6310              * pxList.  See the definition of TaskStatus_t in task.h for the
6311              * meaning of each TaskStatus_t structure member. */
6312             do
6313             {
6314                 /* MISRA Ref 11.5.3 [Void pointer assignment] */
6315                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
6316                 /* coverity[misra_c_2012_rule_11_5_violation] */
6317                 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
6318                 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
6319                 uxTask++;
6320             } while( pxNextTCB != pxFirstTCB );
6321         }
6322         else
6323         {
6324             mtCOVERAGE_TEST_MARKER();
6325         }
6326
6327         return uxTask;
6328     }
6329
6330 #endif /* configUSE_TRACE_FACILITY */
6331 /*-----------------------------------------------------------*/
6332
6333 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
6334
6335     static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
6336     {
6337         uint32_t ulCount = 0U;
6338
6339         while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
6340         {
6341             pucStackByte -= portSTACK_GROWTH;
6342             ulCount++;
6343         }
6344
6345         ulCount /= ( uint32_t ) sizeof( StackType_t );
6346
6347         return ( configSTACK_DEPTH_TYPE ) ulCount;
6348     }
6349
6350 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
6351 /*-----------------------------------------------------------*/
6352
6353 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
6354
6355 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
6356  * same except for their return type.  Using configSTACK_DEPTH_TYPE allows the
6357  * user to determine the return type.  It gets around the problem of the value
6358  * overflowing on 8-bit types without breaking backward compatibility for
6359  * applications that expect an 8-bit return type. */
6360     configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
6361     {
6362         TCB_t * pxTCB;
6363         uint8_t * pucEndOfStack;
6364         configSTACK_DEPTH_TYPE uxReturn;
6365
6366         traceENTER_uxTaskGetStackHighWaterMark2( xTask );
6367
6368         /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
6369          * the same except for their return type.  Using configSTACK_DEPTH_TYPE
6370          * allows the user to determine the return type.  It gets around the
6371          * problem of the value overflowing on 8-bit types without breaking
6372          * backward compatibility for applications that expect an 8-bit return
6373          * type. */
6374
6375         pxTCB = prvGetTCBFromHandle( xTask );
6376
6377         #if portSTACK_GROWTH < 0
6378         {
6379             pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
6380         }
6381         #else
6382         {
6383             pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
6384         }
6385         #endif
6386
6387         uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
6388
6389         traceRETURN_uxTaskGetStackHighWaterMark2( uxReturn );
6390
6391         return uxReturn;
6392     }
6393
6394 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
6395 /*-----------------------------------------------------------*/
6396
6397 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
6398
6399     UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
6400     {
6401         TCB_t * pxTCB;
6402         uint8_t * pucEndOfStack;
6403         UBaseType_t uxReturn;
6404
6405         traceENTER_uxTaskGetStackHighWaterMark( xTask );
6406
6407         pxTCB = prvGetTCBFromHandle( xTask );
6408
6409         #if portSTACK_GROWTH < 0
6410         {
6411             pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
6412         }
6413         #else
6414         {
6415             pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
6416         }
6417         #endif
6418
6419         uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
6420
6421         traceRETURN_uxTaskGetStackHighWaterMark( uxReturn );
6422
6423         return uxReturn;
6424     }
6425
6426 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
6427 /*-----------------------------------------------------------*/
6428
6429 #if ( INCLUDE_vTaskDelete == 1 )
6430
6431     static void prvDeleteTCB( TCB_t * pxTCB )
6432     {
6433         /* This call is required specifically for the TriCore port.  It must be
6434          * above the vPortFree() calls.  The call is also used by ports/demos that
6435          * want to allocate and clean RAM statically. */
6436         portCLEAN_UP_TCB( pxTCB );
6437
6438         #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
6439         {
6440             /* Free up the memory allocated for the task's TLS Block. */
6441             configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock );
6442         }
6443         #endif
6444
6445         #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
6446         {
6447             /* The task can only have been allocated dynamically - free both
6448              * the stack and TCB. */
6449             vPortFreeStack( pxTCB->pxStack );
6450             vPortFree( pxTCB );
6451         }
6452         #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
6453         {
6454             /* The task could have been allocated statically or dynamically, so
6455              * check what was statically allocated before trying to free the
6456              * memory. */
6457             if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
6458             {
6459                 /* Both the stack and TCB were allocated dynamically, so both
6460                  * must be freed. */
6461                 vPortFreeStack( pxTCB->pxStack );
6462                 vPortFree( pxTCB );
6463             }
6464             else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
6465             {
6466                 /* Only the stack was statically allocated, so the TCB is the
6467                  * only memory that must be freed. */
6468                 vPortFree( pxTCB );
6469             }
6470             else
6471             {
6472                 /* Neither the stack nor the TCB were allocated dynamically, so
6473                  * nothing needs to be freed. */
6474                 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
6475                 mtCOVERAGE_TEST_MARKER();
6476             }
6477         }
6478         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
6479     }
6480
6481 #endif /* INCLUDE_vTaskDelete */
6482 /*-----------------------------------------------------------*/
6483
6484 static void prvResetNextTaskUnblockTime( void )
6485 {
6486     if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
6487     {
6488         /* The new current delayed list is empty.  Set xNextTaskUnblockTime to
6489          * the maximum possible value so it is  extremely unlikely that the
6490          * if( xTickCount >= xNextTaskUnblockTime ) test will pass until
6491          * there is an item in the delayed list. */
6492         xNextTaskUnblockTime = portMAX_DELAY;
6493     }
6494     else
6495     {
6496         /* The new current delayed list is not empty, get the value of
6497          * the item at the head of the delayed list.  This is the time at
6498          * which the task at the head of the delayed list should be removed
6499          * from the Blocked state. */
6500         xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList );
6501     }
6502 }
6503 /*-----------------------------------------------------------*/
6504
6505 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUMBER_OF_CORES > 1 )
6506
6507     #if ( configNUMBER_OF_CORES == 1 )
6508         TaskHandle_t xTaskGetCurrentTaskHandle( void )
6509         {
6510             TaskHandle_t xReturn;
6511
6512             traceENTER_xTaskGetCurrentTaskHandle();
6513
6514             /* A critical section is not required as this is not called from
6515              * an interrupt and the current TCB will always be the same for any
6516              * individual execution thread. */
6517             xReturn = pxCurrentTCB;
6518
6519             traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
6520
6521             return xReturn;
6522         }
6523     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
6524         TaskHandle_t xTaskGetCurrentTaskHandle( void )
6525         {
6526             TaskHandle_t xReturn;
6527             UBaseType_t uxSavedInterruptStatus;
6528
6529             traceENTER_xTaskGetCurrentTaskHandle();
6530
6531             uxSavedInterruptStatus = portSET_INTERRUPT_MASK();
6532             {
6533                 xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
6534             }
6535             portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus );
6536
6537             traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
6538
6539             return xReturn;
6540         }
6541
6542         TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID )
6543         {
6544             TaskHandle_t xReturn = NULL;
6545
6546             traceENTER_xTaskGetCurrentTaskHandleForCore( xCoreID );
6547
6548             if( taskVALID_CORE_ID( xCoreID ) != pdFALSE )
6549             {
6550                 xReturn = pxCurrentTCBs[ xCoreID ];
6551             }
6552
6553             traceRETURN_xTaskGetCurrentTaskHandleForCore( xReturn );
6554
6555             return xReturn;
6556         }
6557     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
6558
6559 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
6560 /*-----------------------------------------------------------*/
6561
6562 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
6563
6564     BaseType_t xTaskGetSchedulerState( void )
6565     {
6566         BaseType_t xReturn;
6567
6568         traceENTER_xTaskGetSchedulerState();
6569
6570         if( xSchedulerRunning == pdFALSE )
6571         {
6572             xReturn = taskSCHEDULER_NOT_STARTED;
6573         }
6574         else
6575         {
6576             #if ( configNUMBER_OF_CORES > 1 )
6577                 taskENTER_CRITICAL();
6578             #endif
6579             {
6580                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
6581                 {
6582                     xReturn = taskSCHEDULER_RUNNING;
6583                 }
6584                 else
6585                 {
6586                     xReturn = taskSCHEDULER_SUSPENDED;
6587                 }
6588             }
6589             #if ( configNUMBER_OF_CORES > 1 )
6590                 taskEXIT_CRITICAL();
6591             #endif
6592         }
6593
6594         traceRETURN_xTaskGetSchedulerState( xReturn );
6595
6596         return xReturn;
6597     }
6598
6599 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
6600 /*-----------------------------------------------------------*/
6601
6602 #if ( configUSE_MUTEXES == 1 )
6603
6604     BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
6605     {
6606         TCB_t * const pxMutexHolderTCB = pxMutexHolder;
6607         BaseType_t xReturn = pdFALSE;
6608
6609         traceENTER_xTaskPriorityInherit( pxMutexHolder );
6610
6611         /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority
6612          * inheritance is not applied in this scenario. */
6613         if( pxMutexHolder != NULL )
6614         {
6615             /* If the holder of the mutex has a priority below the priority of
6616              * the task attempting to obtain the mutex then it will temporarily
6617              * inherit the priority of the task attempting to obtain the mutex. */
6618             if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
6619             {
6620                 /* Adjust the mutex holder state to account for its new
6621                  * priority.  Only reset the event list item value if the value is
6622                  * not being used for anything else. */
6623                 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0UL ) )
6624                 {
6625                     listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority );
6626                 }
6627                 else
6628                 {
6629                     mtCOVERAGE_TEST_MARKER();
6630                 }
6631
6632                 /* If the task being modified is in the ready state it will need
6633                  * to be moved into a new list. */
6634                 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
6635                 {
6636                     if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6637                     {
6638                         /* It is known that the task is in its ready list so
6639                          * there is no need to check again and the port level
6640                          * reset macro can be called directly. */
6641                         portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
6642                     }
6643                     else
6644                     {
6645                         mtCOVERAGE_TEST_MARKER();
6646                     }
6647
6648                     /* Inherit the priority before being moved into the new list. */
6649                     pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
6650                     prvAddTaskToReadyList( pxMutexHolderTCB );
6651                     #if ( configNUMBER_OF_CORES > 1 )
6652                     {
6653                         /* The priority of the task is raised. Yield for this task
6654                          * if it is not running. */
6655                         if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE )
6656                         {
6657                             prvYieldForTask( pxMutexHolderTCB );
6658                         }
6659                     }
6660                     #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6661                 }
6662                 else
6663                 {
6664                     /* Just inherit the priority. */
6665                     pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
6666                 }
6667
6668                 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
6669
6670                 /* Inheritance occurred. */
6671                 xReturn = pdTRUE;
6672             }
6673             else
6674             {
6675                 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
6676                 {
6677                     /* The base priority of the mutex holder is lower than the
6678                      * priority of the task attempting to take the mutex, but the
6679                      * current priority of the mutex holder is not lower than the
6680                      * priority of the task attempting to take the mutex.
6681                      * Therefore the mutex holder must have already inherited a
6682                      * priority, but inheritance would have occurred if that had
6683                      * not been the case. */
6684                     xReturn = pdTRUE;
6685                 }
6686                 else
6687                 {
6688                     mtCOVERAGE_TEST_MARKER();
6689                 }
6690             }
6691         }
6692         else
6693         {
6694             mtCOVERAGE_TEST_MARKER();
6695         }
6696
6697         traceRETURN_xTaskPriorityInherit( xReturn );
6698
6699         return xReturn;
6700     }
6701
6702 #endif /* configUSE_MUTEXES */
6703 /*-----------------------------------------------------------*/
6704
6705 #if ( configUSE_MUTEXES == 1 )
6706
6707     BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
6708     {
6709         TCB_t * const pxTCB = pxMutexHolder;
6710         BaseType_t xReturn = pdFALSE;
6711
6712         traceENTER_xTaskPriorityDisinherit( pxMutexHolder );
6713
6714         if( pxMutexHolder != NULL )
6715         {
6716             /* A task can only have an inherited priority if it holds the mutex.
6717              * If the mutex is held by a task then it cannot be given from an
6718              * interrupt, and if a mutex is given by the holding task then it must
6719              * be the running state task. */
6720             configASSERT( pxTCB == pxCurrentTCB );
6721             configASSERT( pxTCB->uxMutexesHeld );
6722             ( pxTCB->uxMutexesHeld )--;
6723
6724             /* Has the holder of the mutex inherited the priority of another
6725              * task? */
6726             if( pxTCB->uxPriority != pxTCB->uxBasePriority )
6727             {
6728                 /* Only disinherit if no other mutexes are held. */
6729                 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
6730                 {
6731                     /* A task can only have an inherited priority if it holds
6732                      * the mutex.  If the mutex is held by a task then it cannot be
6733                      * given from an interrupt, and if a mutex is given by the
6734                      * holding task then it must be the running state task.  Remove
6735                      * the holding task from the ready list. */
6736                     if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6737                     {
6738                         portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
6739                     }
6740                     else
6741                     {
6742                         mtCOVERAGE_TEST_MARKER();
6743                     }
6744
6745                     /* Disinherit the priority before adding the task into the
6746                      * new  ready list. */
6747                     traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
6748                     pxTCB->uxPriority = pxTCB->uxBasePriority;
6749
6750                     /* Reset the event list item value.  It cannot be in use for
6751                      * any other purpose if this task is running, and it must be
6752                      * running to give back the mutex. */
6753                     listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority );
6754                     prvAddTaskToReadyList( pxTCB );
6755                     #if ( configNUMBER_OF_CORES > 1 )
6756                     {
6757                         /* The priority of the task is dropped. Yield the core on
6758                          * which the task is running. */
6759                         if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6760                         {
6761                             prvYieldCore( pxTCB->xTaskRunState );
6762                         }
6763                     }
6764                     #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6765
6766                     /* Return true to indicate that a context switch is required.
6767                      * This is only actually required in the corner case whereby
6768                      * multiple mutexes were held and the mutexes were given back
6769                      * in an order different to that in which they were taken.
6770                      * If a context switch did not occur when the first mutex was
6771                      * returned, even if a task was waiting on it, then a context
6772                      * switch should occur when the last mutex is returned whether
6773                      * a task is waiting on it or not. */
6774                     xReturn = pdTRUE;
6775                 }
6776                 else
6777                 {
6778                     mtCOVERAGE_TEST_MARKER();
6779                 }
6780             }
6781             else
6782             {
6783                 mtCOVERAGE_TEST_MARKER();
6784             }
6785         }
6786         else
6787         {
6788             mtCOVERAGE_TEST_MARKER();
6789         }
6790
6791         traceRETURN_xTaskPriorityDisinherit( xReturn );
6792
6793         return xReturn;
6794     }
6795
6796 #endif /* configUSE_MUTEXES */
6797 /*-----------------------------------------------------------*/
6798
6799 #if ( configUSE_MUTEXES == 1 )
6800
6801     void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
6802                                               UBaseType_t uxHighestPriorityWaitingTask )
6803     {
6804         TCB_t * const pxTCB = pxMutexHolder;
6805         UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
6806         const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
6807
6808         traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask );
6809
6810         if( pxMutexHolder != NULL )
6811         {
6812             /* If pxMutexHolder is not NULL then the holder must hold at least
6813              * one mutex. */
6814             configASSERT( pxTCB->uxMutexesHeld );
6815
6816             /* Determine the priority to which the priority of the task that
6817              * holds the mutex should be set.  This will be the greater of the
6818              * holding task's base priority and the priority of the highest
6819              * priority task that is waiting to obtain the mutex. */
6820             if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
6821             {
6822                 uxPriorityToUse = uxHighestPriorityWaitingTask;
6823             }
6824             else
6825             {
6826                 uxPriorityToUse = pxTCB->uxBasePriority;
6827             }
6828
6829             /* Does the priority need to change? */
6830             if( pxTCB->uxPriority != uxPriorityToUse )
6831             {
6832                 /* Only disinherit if no other mutexes are held.  This is a
6833                  * simplification in the priority inheritance implementation.  If
6834                  * the task that holds the mutex is also holding other mutexes then
6835                  * the other mutexes may have caused the priority inheritance. */
6836                 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
6837                 {
6838                     /* If a task has timed out because it already holds the
6839                      * mutex it was trying to obtain then it cannot of inherited
6840                      * its own priority. */
6841                     configASSERT( pxTCB != pxCurrentTCB );
6842
6843                     /* Disinherit the priority, remembering the previous
6844                      * priority to facilitate determining the subject task's
6845                      * state. */
6846                     traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse );
6847                     uxPriorityUsedOnEntry = pxTCB->uxPriority;
6848                     pxTCB->uxPriority = uxPriorityToUse;
6849
6850                     /* Only reset the event list item value if the value is not
6851                      * being used for anything else. */
6852                     if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == ( ( TickType_t ) 0UL ) )
6853                     {
6854                         listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse );
6855                     }
6856                     else
6857                     {
6858                         mtCOVERAGE_TEST_MARKER();
6859                     }
6860
6861                     /* If the running task is not the task that holds the mutex
6862                      * then the task that holds the mutex could be in either the
6863                      * Ready, Blocked or Suspended states.  Only remove the task
6864                      * from its current state list if it is in the Ready state as
6865                      * the task's priority is going to change and there is one
6866                      * Ready list per priority. */
6867                     if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
6868                     {
6869                         if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6870                         {
6871                             /* It is known that the task is in its ready list so
6872                              * there is no need to check again and the port level
6873                              * reset macro can be called directly. */
6874                             portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
6875                         }
6876                         else
6877                         {
6878                             mtCOVERAGE_TEST_MARKER();
6879                         }
6880
6881                         prvAddTaskToReadyList( pxTCB );
6882                         #if ( configNUMBER_OF_CORES > 1 )
6883                         {
6884                             /* The priority of the task is dropped. Yield the core on
6885                              * which the task is running. */
6886                             if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6887                             {
6888                                 prvYieldCore( pxTCB->xTaskRunState );
6889                             }
6890                         }
6891                         #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6892                     }
6893                     else
6894                     {
6895                         mtCOVERAGE_TEST_MARKER();
6896                     }
6897                 }
6898                 else
6899                 {
6900                     mtCOVERAGE_TEST_MARKER();
6901                 }
6902             }
6903             else
6904             {
6905                 mtCOVERAGE_TEST_MARKER();
6906             }
6907         }
6908         else
6909         {
6910             mtCOVERAGE_TEST_MARKER();
6911         }
6912
6913         traceRETURN_vTaskPriorityDisinheritAfterTimeout();
6914     }
6915
6916 #endif /* configUSE_MUTEXES */
6917 /*-----------------------------------------------------------*/
6918
6919 #if ( configNUMBER_OF_CORES > 1 )
6920
6921 /* If not in a critical section then yield immediately.
6922  * Otherwise set xYieldPendings to true to wait to
6923  * yield until exiting the critical section.
6924  */
6925     void vTaskYieldWithinAPI( void )
6926     {
6927         traceENTER_vTaskYieldWithinAPI();
6928
6929         if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6930         {
6931             portYIELD();
6932         }
6933         else
6934         {
6935             xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
6936         }
6937
6938         traceRETURN_vTaskYieldWithinAPI();
6939     }
6940 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
6941
6942 /*-----------------------------------------------------------*/
6943
6944 #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
6945
6946     void vTaskEnterCritical( void )
6947     {
6948         traceENTER_vTaskEnterCritical();
6949
6950         portDISABLE_INTERRUPTS();
6951
6952         if( xSchedulerRunning != pdFALSE )
6953         {
6954             ( pxCurrentTCB->uxCriticalNesting )++;
6955
6956             /* This is not the interrupt safe version of the enter critical
6957              * function so  assert() if it is being called from an interrupt
6958              * context.  Only API functions that end in "FromISR" can be used in an
6959              * interrupt.  Only assert if the critical nesting count is 1 to
6960              * protect against recursive calls if the assert function also uses a
6961              * critical section. */
6962             if( pxCurrentTCB->uxCriticalNesting == 1U )
6963             {
6964                 portASSERT_IF_IN_ISR();
6965             }
6966         }
6967         else
6968         {
6969             mtCOVERAGE_TEST_MARKER();
6970         }
6971
6972         traceRETURN_vTaskEnterCritical();
6973     }
6974
6975 #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
6976 /*-----------------------------------------------------------*/
6977
6978 #if ( configNUMBER_OF_CORES > 1 )
6979
6980     void vTaskEnterCritical( void )
6981     {
6982         traceENTER_vTaskEnterCritical();
6983
6984         portDISABLE_INTERRUPTS();
6985
6986         if( xSchedulerRunning != pdFALSE )
6987         {
6988             if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6989             {
6990                 portGET_TASK_LOCK();
6991                 portGET_ISR_LOCK();
6992             }
6993
6994             portINCREMENT_CRITICAL_NESTING_COUNT();
6995
6996             /* This is not the interrupt safe version of the enter critical
6997              * function so  assert() if it is being called from an interrupt
6998              * context.  Only API functions that end in "FromISR" can be used in an
6999              * interrupt.  Only assert if the critical nesting count is 1 to
7000              * protect against recursive calls if the assert function also uses a
7001              * critical section. */
7002             if( portGET_CRITICAL_NESTING_COUNT() == 1U )
7003             {
7004                 portASSERT_IF_IN_ISR();
7005
7006                 if( uxSchedulerSuspended == 0U )
7007                 {
7008                     /* The only time there would be a problem is if this is called
7009                      * before a context switch and vTaskExitCritical() is called
7010                      * after pxCurrentTCB changes. Therefore this should not be
7011                      * used within vTaskSwitchContext(). */
7012                     prvCheckForRunStateChange();
7013                 }
7014             }
7015         }
7016         else
7017         {
7018             mtCOVERAGE_TEST_MARKER();
7019         }
7020
7021         traceRETURN_vTaskEnterCritical();
7022     }
7023
7024 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7025
7026 /*-----------------------------------------------------------*/
7027
7028 #if ( configNUMBER_OF_CORES > 1 )
7029
7030     UBaseType_t vTaskEnterCriticalFromISR( void )
7031     {
7032         UBaseType_t uxSavedInterruptStatus = 0;
7033
7034         traceENTER_vTaskEnterCriticalFromISR();
7035
7036         if( xSchedulerRunning != pdFALSE )
7037         {
7038             uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
7039
7040             if( portGET_CRITICAL_NESTING_COUNT() == 0U )
7041             {
7042                 portGET_ISR_LOCK();
7043             }
7044
7045             portINCREMENT_CRITICAL_NESTING_COUNT();
7046         }
7047         else
7048         {
7049             mtCOVERAGE_TEST_MARKER();
7050         }
7051
7052         traceRETURN_vTaskEnterCriticalFromISR( uxSavedInterruptStatus );
7053
7054         return uxSavedInterruptStatus;
7055     }
7056
7057 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7058 /*-----------------------------------------------------------*/
7059
7060 #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
7061
7062     void vTaskExitCritical( void )
7063     {
7064         traceENTER_vTaskExitCritical();
7065
7066         if( xSchedulerRunning != pdFALSE )
7067         {
7068             /* If pxCurrentTCB->uxCriticalNesting is zero then this function
7069              * does not match a previous call to vTaskEnterCritical(). */
7070             configASSERT( pxCurrentTCB->uxCriticalNesting > 0U );
7071
7072             /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
7073              * to exit critical section from ISR. */
7074             portASSERT_IF_IN_ISR();
7075
7076             if( pxCurrentTCB->uxCriticalNesting > 0U )
7077             {
7078                 ( pxCurrentTCB->uxCriticalNesting )--;
7079
7080                 if( pxCurrentTCB->uxCriticalNesting == 0U )
7081                 {
7082                     portENABLE_INTERRUPTS();
7083                 }
7084                 else
7085                 {
7086                     mtCOVERAGE_TEST_MARKER();
7087                 }
7088             }
7089             else
7090             {
7091                 mtCOVERAGE_TEST_MARKER();
7092             }
7093         }
7094         else
7095         {
7096             mtCOVERAGE_TEST_MARKER();
7097         }
7098
7099         traceRETURN_vTaskExitCritical();
7100     }
7101
7102 #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
7103 /*-----------------------------------------------------------*/
7104
7105 #if ( configNUMBER_OF_CORES > 1 )
7106
7107     void vTaskExitCritical( void )
7108     {
7109         traceENTER_vTaskExitCritical();
7110
7111         if( xSchedulerRunning != pdFALSE )
7112         {
7113             /* If critical nesting count is zero then this function
7114              * does not match a previous call to vTaskEnterCritical(). */
7115             configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
7116
7117             /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
7118              * to exit critical section from ISR. */
7119             portASSERT_IF_IN_ISR();
7120
7121             if( portGET_CRITICAL_NESTING_COUNT() > 0U )
7122             {
7123                 portDECREMENT_CRITICAL_NESTING_COUNT();
7124
7125                 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
7126                 {
7127                     BaseType_t xYieldCurrentTask;
7128
7129                     /* Get the xYieldPending stats inside the critical section. */
7130                     xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ];
7131
7132                     portRELEASE_ISR_LOCK();
7133                     portRELEASE_TASK_LOCK();
7134                     portENABLE_INTERRUPTS();
7135
7136                     /* When a task yields in a critical section it just sets
7137                      * xYieldPending to true. So now that we have exited the
7138                      * critical section check if xYieldPending is true, and
7139                      * if so yield. */
7140                     if( xYieldCurrentTask != pdFALSE )
7141                     {
7142                         portYIELD();
7143                     }
7144                 }
7145                 else
7146                 {
7147                     mtCOVERAGE_TEST_MARKER();
7148                 }
7149             }
7150             else
7151             {
7152                 mtCOVERAGE_TEST_MARKER();
7153             }
7154         }
7155         else
7156         {
7157             mtCOVERAGE_TEST_MARKER();
7158         }
7159
7160         traceRETURN_vTaskExitCritical();
7161     }
7162
7163 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7164 /*-----------------------------------------------------------*/
7165
7166 #if ( configNUMBER_OF_CORES > 1 )
7167
7168     void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus )
7169     {
7170         traceENTER_vTaskExitCriticalFromISR( uxSavedInterruptStatus );
7171
7172         if( xSchedulerRunning != pdFALSE )
7173         {
7174             /* If critical nesting count is zero then this function
7175              * does not match a previous call to vTaskEnterCritical(). */
7176             configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
7177
7178             if( portGET_CRITICAL_NESTING_COUNT() > 0U )
7179             {
7180                 portDECREMENT_CRITICAL_NESTING_COUNT();
7181
7182                 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
7183                 {
7184                     portRELEASE_ISR_LOCK();
7185                     portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
7186                 }
7187                 else
7188                 {
7189                     mtCOVERAGE_TEST_MARKER();
7190                 }
7191             }
7192             else
7193             {
7194                 mtCOVERAGE_TEST_MARKER();
7195             }
7196         }
7197         else
7198         {
7199             mtCOVERAGE_TEST_MARKER();
7200         }
7201
7202         traceRETURN_vTaskExitCriticalFromISR();
7203     }
7204
7205 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7206 /*-----------------------------------------------------------*/
7207
7208 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
7209
7210     static char * prvWriteNameToBuffer( char * pcBuffer,
7211                                         const char * pcTaskName )
7212     {
7213         size_t x;
7214
7215         /* Start by copying the entire string. */
7216         ( void ) strcpy( pcBuffer, pcTaskName );
7217
7218         /* Pad the end of the string with spaces to ensure columns line up when
7219          * printed out. */
7220         for( x = strlen( pcBuffer ); x < ( size_t ) ( ( size_t ) configMAX_TASK_NAME_LEN - 1U ); x++ )
7221         {
7222             pcBuffer[ x ] = ' ';
7223         }
7224
7225         /* Terminate. */
7226         pcBuffer[ x ] = ( char ) 0x00;
7227
7228         /* Return the new end of string. */
7229         return &( pcBuffer[ x ] );
7230     }
7231
7232 #endif /* ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
7233 /*-----------------------------------------------------------*/
7234
7235 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
7236
7237     void vTaskListTasks( char * pcWriteBuffer,
7238                          size_t uxBufferLength )
7239     {
7240         TaskStatus_t * pxTaskStatusArray;
7241         size_t uxConsumedBufferLength = 0;
7242         size_t uxCharsWrittenBySnprintf;
7243         int iSnprintfReturnValue;
7244         BaseType_t xOutputBufferFull = pdFALSE;
7245         UBaseType_t uxArraySize, x;
7246         char cStatus;
7247
7248         traceENTER_vTaskListTasks( pcWriteBuffer, uxBufferLength );
7249
7250         /*
7251          * PLEASE NOTE:
7252          *
7253          * This function is provided for convenience only, and is used by many
7254          * of the demo applications.  Do not consider it to be part of the
7255          * scheduler.
7256          *
7257          * vTaskListTasks() calls uxTaskGetSystemState(), then formats part of the
7258          * uxTaskGetSystemState() output into a human readable table that
7259          * displays task: names, states, priority, stack usage and task number.
7260          * Stack usage specified as the number of unused StackType_t words stack can hold
7261          * on top of stack - not the number of bytes.
7262          *
7263          * vTaskListTasks() has a dependency on the snprintf() C library function that
7264          * might bloat the code size, use a lot of stack, and provide different
7265          * results on different platforms.  An alternative, tiny, third party,
7266          * and limited functionality implementation of snprintf() is provided in
7267          * many of the FreeRTOS/Demo sub-directories in a file called
7268          * printf-stdarg.c (note printf-stdarg.c does not provide a full
7269          * snprintf() implementation!).
7270          *
7271          * It is recommended that production systems call uxTaskGetSystemState()
7272          * directly to get access to raw stats data, rather than indirectly
7273          * through a call to vTaskListTasks().
7274          */
7275
7276
7277         /* Make sure the write buffer does not contain a string. */
7278         *pcWriteBuffer = ( char ) 0x00;
7279
7280         /* Take a snapshot of the number of tasks in case it changes while this
7281          * function is executing. */
7282         uxArraySize = uxCurrentNumberOfTasks;
7283
7284         /* Allocate an array index for each task.  NOTE!  if
7285          * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
7286          * equate to NULL. */
7287         /* MISRA Ref 11.5.1 [Malloc memory assignment] */
7288         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
7289         /* coverity[misra_c_2012_rule_11_5_violation] */
7290         pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
7291
7292         if( pxTaskStatusArray != NULL )
7293         {
7294             /* Generate the (binary) data. */
7295             uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
7296
7297             /* Create a human readable table from the binary data. */
7298             for( x = 0; x < uxArraySize; x++ )
7299             {
7300                 switch( pxTaskStatusArray[ x ].eCurrentState )
7301                 {
7302                     case eRunning:
7303                         cStatus = tskRUNNING_CHAR;
7304                         break;
7305
7306                     case eReady:
7307                         cStatus = tskREADY_CHAR;
7308                         break;
7309
7310                     case eBlocked:
7311                         cStatus = tskBLOCKED_CHAR;
7312                         break;
7313
7314                     case eSuspended:
7315                         cStatus = tskSUSPENDED_CHAR;
7316                         break;
7317
7318                     case eDeleted:
7319                         cStatus = tskDELETED_CHAR;
7320                         break;
7321
7322                     case eInvalid: /* Fall through. */
7323                     default:       /* Should not get here, but it is included
7324                                     * to prevent static checking errors. */
7325                         cStatus = ( char ) 0x00;
7326                         break;
7327                 }
7328
7329                 /* Is there enough space in the buffer to hold task name? */
7330                 if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
7331                 {
7332                     /* Write the task name to the string, padding with spaces so it
7333                      * can be printed in tabular form more easily. */
7334                     pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
7335                     /* Do not count the terminating null character. */
7336                     uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1U );
7337
7338                     /* Is there space left in the buffer? -1 is done because snprintf
7339                      * writes a terminating null character. So we are essentially
7340                      * checking if the buffer has space to write at least one non-null
7341                      * character. */
7342                     if( uxConsumedBufferLength < ( uxBufferLength - 1U ) )
7343                     {
7344                         /* Write the rest of the string. */
7345                         #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
7346                             /* MISRA Ref 21.6.1 [snprintf for utility] */
7347                             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7348                             /* coverity[misra_c_2012_rule_21_6_violation] */
7349                             iSnprintfReturnValue = snprintf( pcWriteBuffer,
7350                                                              uxBufferLength - uxConsumedBufferLength,
7351                                                              "\t%c\t%u\t%u\t%u\t0x%x\r\n",
7352                                                              cStatus,
7353                                                              ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
7354                                                              ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
7355                                                              ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber,
7356                                                              ( unsigned int ) pxTaskStatusArray[ x ].uxCoreAffinityMask );
7357                         #else /* ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
7358                             /* MISRA Ref 21.6.1 [snprintf for utility] */
7359                             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7360                             /* coverity[misra_c_2012_rule_21_6_violation] */
7361                             iSnprintfReturnValue = snprintf( pcWriteBuffer,
7362                                                              uxBufferLength - uxConsumedBufferLength,
7363                                                              "\t%c\t%u\t%u\t%u\r\n",
7364                                                              cStatus,
7365                                                              ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
7366                                                              ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
7367                                                              ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber );
7368                         #endif /* ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
7369                         uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
7370
7371                         uxConsumedBufferLength += uxCharsWrittenBySnprintf;
7372                         pcWriteBuffer += uxCharsWrittenBySnprintf;
7373                     }
7374                     else
7375                     {
7376                         xOutputBufferFull = pdTRUE;
7377                     }
7378                 }
7379                 else
7380                 {
7381                     xOutputBufferFull = pdTRUE;
7382                 }
7383
7384                 if( xOutputBufferFull == pdTRUE )
7385                 {
7386                     break;
7387                 }
7388             }
7389
7390             /* Free the array again.  NOTE!  If configSUPPORT_DYNAMIC_ALLOCATION
7391              * is 0 then vPortFree() will be #defined to nothing. */
7392             vPortFree( pxTaskStatusArray );
7393         }
7394         else
7395         {
7396             mtCOVERAGE_TEST_MARKER();
7397         }
7398
7399         traceRETURN_vTaskListTasks();
7400     }
7401
7402 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
7403 /*----------------------------------------------------------*/
7404
7405 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configUSE_TRACE_FACILITY == 1 ) )
7406
7407     void vTaskGetRunTimeStatistics( char * pcWriteBuffer,
7408                                     size_t uxBufferLength )
7409     {
7410         TaskStatus_t * pxTaskStatusArray;
7411         size_t uxConsumedBufferLength = 0;
7412         size_t uxCharsWrittenBySnprintf;
7413         int iSnprintfReturnValue;
7414         BaseType_t xOutputBufferFull = pdFALSE;
7415         UBaseType_t uxArraySize, x;
7416         configRUN_TIME_COUNTER_TYPE ulTotalTime = 0;
7417         configRUN_TIME_COUNTER_TYPE ulStatsAsPercentage;
7418
7419         traceENTER_vTaskGetRunTimeStatistics( pcWriteBuffer, uxBufferLength );
7420
7421         /*
7422          * PLEASE NOTE:
7423          *
7424          * This function is provided for convenience only, and is used by many
7425          * of the demo applications.  Do not consider it to be part of the
7426          * scheduler.
7427          *
7428          * vTaskGetRunTimeStatistics() calls uxTaskGetSystemState(), then formats part
7429          * of the uxTaskGetSystemState() output into a human readable table that
7430          * displays the amount of time each task has spent in the Running state
7431          * in both absolute and percentage terms.
7432          *
7433          * vTaskGetRunTimeStatistics() has a dependency on the snprintf() C library
7434          * function that might bloat the code size, use a lot of stack, and
7435          * provide different results on different platforms.  An alternative,
7436          * tiny, third party, and limited functionality implementation of
7437          * snprintf() is provided in many of the FreeRTOS/Demo sub-directories in
7438          * a file called printf-stdarg.c (note printf-stdarg.c does not provide
7439          * a full snprintf() implementation!).
7440          *
7441          * It is recommended that production systems call uxTaskGetSystemState()
7442          * directly to get access to raw stats data, rather than indirectly
7443          * through a call to vTaskGetRunTimeStatistics().
7444          */
7445
7446         /* Make sure the write buffer does not contain a string. */
7447         *pcWriteBuffer = ( char ) 0x00;
7448
7449         /* Take a snapshot of the number of tasks in case it changes while this
7450          * function is executing. */
7451         uxArraySize = uxCurrentNumberOfTasks;
7452
7453         /* Allocate an array index for each task.  NOTE!  If
7454          * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
7455          * equate to NULL. */
7456         /* MISRA Ref 11.5.1 [Malloc memory assignment] */
7457         /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
7458         /* coverity[misra_c_2012_rule_11_5_violation] */
7459         pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) );
7460
7461         if( pxTaskStatusArray != NULL )
7462         {
7463             /* Generate the (binary) data. */
7464             uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
7465
7466             /* For percentage calculations. */
7467             ulTotalTime /= ( ( configRUN_TIME_COUNTER_TYPE ) 100UL );
7468
7469             /* Avoid divide by zero errors. */
7470             if( ulTotalTime > 0UL )
7471             {
7472                 /* Create a human readable table from the binary data. */
7473                 for( x = 0; x < uxArraySize; x++ )
7474                 {
7475                     /* What percentage of the total run time has the task used?
7476                      * This will always be rounded down to the nearest integer.
7477                      * ulTotalRunTime has already been divided by 100. */
7478                     ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
7479
7480                     /* Is there enough space in the buffer to hold task name? */
7481                     if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
7482                     {
7483                         /* Write the task name to the string, padding with
7484                          * spaces so it can be printed in tabular form more
7485                          * easily. */
7486                         pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
7487                         /* Do not count the terminating null character. */
7488                         uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1U );
7489
7490                         /* Is there space left in the buffer? -1 is done because snprintf
7491                          * writes a terminating null character. So we are essentially
7492                          * checking if the buffer has space to write at least one non-null
7493                          * character. */
7494                         if( uxConsumedBufferLength < ( uxBufferLength - 1U ) )
7495                         {
7496                             if( ulStatsAsPercentage > 0UL )
7497                             {
7498                                 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
7499                                 {
7500                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7501                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7502                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7503                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7504                                                                      uxBufferLength - uxConsumedBufferLength,
7505                                                                      "\t%lu\t\t%lu%%\r\n",
7506                                                                      pxTaskStatusArray[ x ].ulRunTimeCounter,
7507                                                                      ulStatsAsPercentage );
7508                                 }
7509                                 #else /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7510                                 {
7511                                     /* sizeof( int ) == sizeof( long ) so a smaller
7512                                      * printf() library can be used. */
7513                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7514                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7515                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7516                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7517                                                                      uxBufferLength - uxConsumedBufferLength,
7518                                                                      "\t%u\t\t%u%%\r\n",
7519                                                                      ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter,
7520                                                                      ( unsigned int ) ulStatsAsPercentage );
7521                                 }
7522                                 #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7523                             }
7524                             else
7525                             {
7526                                 /* If the percentage is zero here then the task has
7527                                  * consumed less than 1% of the total run time. */
7528                                 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
7529                                 {
7530                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7531                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7532                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7533                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7534                                                                      uxBufferLength - uxConsumedBufferLength,
7535                                                                      "\t%lu\t\t<1%%\r\n",
7536                                                                      pxTaskStatusArray[ x ].ulRunTimeCounter );
7537                                 }
7538                                 #else
7539                                 {
7540                                     /* sizeof( int ) == sizeof( long ) so a smaller
7541                                      * printf() library can be used. */
7542                                     /* MISRA Ref 21.6.1 [snprintf for utility] */
7543                                     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-216 */
7544                                     /* coverity[misra_c_2012_rule_21_6_violation] */
7545                                     iSnprintfReturnValue = snprintf( pcWriteBuffer,
7546                                                                      uxBufferLength - uxConsumedBufferLength,
7547                                                                      "\t%u\t\t<1%%\r\n",
7548                                                                      ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter );
7549                                 }
7550                                 #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7551                             }
7552
7553                             uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
7554                             uxConsumedBufferLength += uxCharsWrittenBySnprintf;
7555                             pcWriteBuffer += uxCharsWrittenBySnprintf;
7556                         }
7557                         else
7558                         {
7559                             xOutputBufferFull = pdTRUE;
7560                         }
7561                     }
7562                     else
7563                     {
7564                         xOutputBufferFull = pdTRUE;
7565                     }
7566
7567                     if( xOutputBufferFull == pdTRUE )
7568                     {
7569                         break;
7570                     }
7571                 }
7572             }
7573             else
7574             {
7575                 mtCOVERAGE_TEST_MARKER();
7576             }
7577
7578             /* Free the array again.  NOTE!  If configSUPPORT_DYNAMIC_ALLOCATION
7579              * is 0 then vPortFree() will be #defined to nothing. */
7580             vPortFree( pxTaskStatusArray );
7581         }
7582         else
7583         {
7584             mtCOVERAGE_TEST_MARKER();
7585         }
7586
7587         traceRETURN_vTaskGetRunTimeStatistics();
7588     }
7589
7590 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
7591 /*-----------------------------------------------------------*/
7592
7593 TickType_t uxTaskResetEventItemValue( void )
7594 {
7595     TickType_t uxReturn;
7596
7597     traceENTER_uxTaskResetEventItemValue();
7598
7599     uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
7600
7601     /* Reset the event list item to its normal value - so it can be used with
7602      * queues and semaphores. */
7603     listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) );
7604
7605     traceRETURN_uxTaskResetEventItemValue( uxReturn );
7606
7607     return uxReturn;
7608 }
7609 /*-----------------------------------------------------------*/
7610
7611 #if ( configUSE_MUTEXES == 1 )
7612
7613     TaskHandle_t pvTaskIncrementMutexHeldCount( void )
7614     {
7615         TCB_t * pxTCB;
7616
7617         traceENTER_pvTaskIncrementMutexHeldCount();
7618
7619         pxTCB = pxCurrentTCB;
7620
7621         /* If xSemaphoreCreateMutex() is called before any tasks have been created
7622          * then pxCurrentTCB will be NULL. */
7623         if( pxTCB != NULL )
7624         {
7625             ( pxTCB->uxMutexesHeld )++;
7626         }
7627
7628         traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB );
7629
7630         return pxTCB;
7631     }
7632
7633 #endif /* configUSE_MUTEXES */
7634 /*-----------------------------------------------------------*/
7635
7636 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7637
7638     uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
7639                                       BaseType_t xClearCountOnExit,
7640                                       TickType_t xTicksToWait )
7641     {
7642         uint32_t ulReturn;
7643         BaseType_t xAlreadyYielded;
7644
7645         traceENTER_ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
7646
7647         configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7648
7649         taskENTER_CRITICAL();
7650
7651         /* Only block if the notification count is not already non-zero. */
7652         if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL )
7653         {
7654             /* Mark this task as waiting for a notification. */
7655             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
7656
7657             if( xTicksToWait > ( TickType_t ) 0 )
7658             {
7659                 traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
7660
7661                 /* We MUST suspend the scheduler before exiting the critical
7662                  * section (i.e. before enabling interrupts).
7663                  *
7664                  * If we do not do so, a notification sent from an ISR, which
7665                  * happens after exiting the critical section and before
7666                  * suspending the scheduler, will get lost. The sequence of
7667                  * events will be:
7668                  * 1. Exit critical section.
7669                  * 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7670                  *    task to the Ready list.
7671                  * 3. Suspend scheduler.
7672                  * 4. prvAddCurrentTaskToDelayedList moves the task to the
7673                  *    delayed or suspended list.
7674                  * 5. Resume scheduler does not touch the task (because it is
7675                  *    not on the pendingReady list), effectively losing the
7676                  *    notification from the ISR.
7677                  *
7678                  * The same does not happen when we suspend the scheduler before
7679                  * exiting the critical section. The sequence of events in this
7680                  * case will be:
7681                  * 1. Suspend scheduler.
7682                  * 2. Exit critical section.
7683                  * 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7684                  *    task to the pendingReady list as the scheduler is
7685                  *    suspended.
7686                  * 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
7687                  *    suspended list. Note that this operation does not nullify
7688                  *    the add to pendingReady list done in the above step because
7689                  *    a different list item, namely xEventListItem, is used for
7690                  *    adding the task to the pendingReady list. In other words,
7691                  *    the task still remains on the pendingReady list.
7692                  * 5. Resume scheduler moves the task from pendingReady list to
7693                  *    the Ready list.
7694                  */
7695                 vTaskSuspendAll();
7696                 {
7697                     taskEXIT_CRITICAL();
7698
7699                     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
7700                 }
7701                 xAlreadyYielded = xTaskResumeAll();
7702
7703                 if( xAlreadyYielded == pdFALSE )
7704                 {
7705                     taskYIELD_WITHIN_API();
7706                 }
7707                 else
7708                 {
7709                     mtCOVERAGE_TEST_MARKER();
7710                 }
7711             }
7712             else
7713             {
7714                 taskEXIT_CRITICAL();
7715             }
7716         }
7717         else
7718         {
7719             taskEXIT_CRITICAL();
7720         }
7721
7722         taskENTER_CRITICAL();
7723         {
7724             traceTASK_NOTIFY_TAKE( uxIndexToWaitOn );
7725             ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
7726
7727             if( ulReturn != 0UL )
7728             {
7729                 if( xClearCountOnExit != pdFALSE )
7730                 {
7731                     pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ( uint32_t ) 0UL;
7732                 }
7733                 else
7734                 {
7735                     pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ulReturn - ( uint32_t ) 1;
7736                 }
7737             }
7738             else
7739             {
7740                 mtCOVERAGE_TEST_MARKER();
7741             }
7742
7743             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
7744         }
7745         taskEXIT_CRITICAL();
7746
7747         traceRETURN_ulTaskGenericNotifyTake( ulReturn );
7748
7749         return ulReturn;
7750     }
7751
7752 #endif /* configUSE_TASK_NOTIFICATIONS */
7753 /*-----------------------------------------------------------*/
7754
7755 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7756
7757     BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
7758                                        uint32_t ulBitsToClearOnEntry,
7759                                        uint32_t ulBitsToClearOnExit,
7760                                        uint32_t * pulNotificationValue,
7761                                        TickType_t xTicksToWait )
7762     {
7763         BaseType_t xReturn, xAlreadyYielded;
7764
7765         traceENTER_xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
7766
7767         configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7768
7769         taskENTER_CRITICAL();
7770
7771         /* Only block if a notification is not already pending. */
7772         if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
7773         {
7774             /* Clear bits in the task's notification value as bits may get
7775              * set  by the notifying task or interrupt.  This can be used to
7776              * clear the value to zero. */
7777             pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
7778
7779             /* Mark this task as waiting for a notification. */
7780             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
7781
7782             if( xTicksToWait > ( TickType_t ) 0 )
7783             {
7784                 traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
7785
7786                 /* We MUST suspend the scheduler before exiting the critical
7787                  * section (i.e. before enabling interrupts).
7788                  *
7789                  * If we do not do so, a notification sent from an ISR, which
7790                  * happens after exiting the critical section and before
7791                  * suspending the scheduler, will get lost. The sequence of
7792                  * events will be:
7793                  * 1. Exit critical section.
7794                  * 2. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7795                  *    task to the Ready list.
7796                  * 3. Suspend scheduler.
7797                  * 4. prvAddCurrentTaskToDelayedList moves the task to the
7798                  *    delayed or suspended list.
7799                  * 5. Resume scheduler does not touch the task (because it is
7800                  *    not on the pendingReady list), effectively losing the
7801                  *    notification from the ISR.
7802                  *
7803                  * The same does not happen when we suspend the scheduler before
7804                  * exiting the critical section. The sequence of events in this
7805                  * case will be:
7806                  * 1. Suspend scheduler.
7807                  * 2. Exit critical section.
7808                  * 3. Interrupt - ISR calls xTaskNotifyFromISR which adds the
7809                  *    task to the pendingReady list as the scheduler is
7810                  *    suspended.
7811                  * 4. prvAddCurrentTaskToDelayedList adds the task to delayed or
7812                  *    suspended list. Note that this operation does not nullify
7813                  *    the add to pendingReady list done in the above step because
7814                  *    a different list item, namely xEventListItem, is used for
7815                  *    adding the task to the pendingReady list. In other words,
7816                  *    the task still remains on the pendingReady list.
7817                  * 5. Resume scheduler moves the task from pendingReady list to
7818                  *    the Ready list.
7819                  */
7820                 vTaskSuspendAll();
7821                 {
7822                     taskEXIT_CRITICAL();
7823
7824                     prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
7825                 }
7826                 xAlreadyYielded = xTaskResumeAll();
7827
7828                 if( xAlreadyYielded == pdFALSE )
7829                 {
7830                     taskYIELD_WITHIN_API();
7831                 }
7832                 else
7833                 {
7834                     mtCOVERAGE_TEST_MARKER();
7835                 }
7836             }
7837             else
7838             {
7839                 taskEXIT_CRITICAL();
7840             }
7841         }
7842         else
7843         {
7844             taskEXIT_CRITICAL();
7845         }
7846
7847         taskENTER_CRITICAL();
7848         {
7849             traceTASK_NOTIFY_WAIT( uxIndexToWaitOn );
7850
7851             if( pulNotificationValue != NULL )
7852             {
7853                 /* Output the current notification value, which may or may not
7854                  * have changed. */
7855                 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
7856             }
7857
7858             /* If ucNotifyValue is set then either the task never entered the
7859              * blocked state (because a notification was already pending) or the
7860              * task unblocked because of a notification.  Otherwise the task
7861              * unblocked because of a timeout. */
7862             if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
7863             {
7864                 /* A notification was not received. */
7865                 xReturn = pdFALSE;
7866             }
7867             else
7868             {
7869                 /* A notification was already pending or a notification was
7870                  * received while the task was waiting. */
7871                 pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnExit;
7872                 xReturn = pdTRUE;
7873             }
7874
7875             pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
7876         }
7877         taskEXIT_CRITICAL();
7878
7879         traceRETURN_xTaskGenericNotifyWait( xReturn );
7880
7881         return xReturn;
7882     }
7883
7884 #endif /* configUSE_TASK_NOTIFICATIONS */
7885 /*-----------------------------------------------------------*/
7886
7887 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7888
7889     BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
7890                                    UBaseType_t uxIndexToNotify,
7891                                    uint32_t ulValue,
7892                                    eNotifyAction eAction,
7893                                    uint32_t * pulPreviousNotificationValue )
7894     {
7895         TCB_t * pxTCB;
7896         BaseType_t xReturn = pdPASS;
7897         uint8_t ucOriginalNotifyState;
7898
7899         traceENTER_xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
7900
7901         configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7902         configASSERT( xTaskToNotify );
7903         pxTCB = xTaskToNotify;
7904
7905         taskENTER_CRITICAL();
7906         {
7907             if( pulPreviousNotificationValue != NULL )
7908             {
7909                 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
7910             }
7911
7912             ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
7913
7914             pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
7915
7916             switch( eAction )
7917             {
7918                 case eSetBits:
7919                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
7920                     break;
7921
7922                 case eIncrement:
7923                     ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
7924                     break;
7925
7926                 case eSetValueWithOverwrite:
7927                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7928                     break;
7929
7930                 case eSetValueWithoutOverwrite:
7931
7932                     if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
7933                     {
7934                         pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7935                     }
7936                     else
7937                     {
7938                         /* The value could not be written to the task. */
7939                         xReturn = pdFAIL;
7940                     }
7941
7942                     break;
7943
7944                 case eNoAction:
7945
7946                     /* The task is being notified without its notify value being
7947                      * updated. */
7948                     break;
7949
7950                 default:
7951
7952                     /* Should not get here if all enums are handled.
7953                      * Artificially force an assert by testing a value the
7954                      * compiler can't assume is const. */
7955                     configASSERT( xTickCount == ( TickType_t ) 0 );
7956
7957                     break;
7958             }
7959
7960             traceTASK_NOTIFY( uxIndexToNotify );
7961
7962             /* If the task is in the blocked state specifically to wait for a
7963              * notification then unblock it now. */
7964             if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
7965             {
7966                 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
7967                 prvAddTaskToReadyList( pxTCB );
7968
7969                 /* The task should not have been on an event list. */
7970                 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
7971
7972                 #if ( configUSE_TICKLESS_IDLE != 0 )
7973                 {
7974                     /* If a task is blocked waiting for a notification then
7975                      * xNextTaskUnblockTime might be set to the blocked task's time
7976                      * out time.  If the task is unblocked for a reason other than
7977                      * a timeout xNextTaskUnblockTime is normally left unchanged,
7978                      * because it will automatically get reset to a new value when
7979                      * the tick count equals xNextTaskUnblockTime.  However if
7980                      * tickless idling is used it might be more important to enter
7981                      * sleep mode at the earliest possible time - so reset
7982                      * xNextTaskUnblockTime here to ensure it is updated at the
7983                      * earliest possible time. */
7984                     prvResetNextTaskUnblockTime();
7985                 }
7986                 #endif
7987
7988                 /* Check if the notified task has a priority above the currently
7989                  * executing task. */
7990                 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
7991             }
7992             else
7993             {
7994                 mtCOVERAGE_TEST_MARKER();
7995             }
7996         }
7997         taskEXIT_CRITICAL();
7998
7999         traceRETURN_xTaskGenericNotify( xReturn );
8000
8001         return xReturn;
8002     }
8003
8004 #endif /* configUSE_TASK_NOTIFICATIONS */
8005 /*-----------------------------------------------------------*/
8006
8007 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8008
8009     BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
8010                                           UBaseType_t uxIndexToNotify,
8011                                           uint32_t ulValue,
8012                                           eNotifyAction eAction,
8013                                           uint32_t * pulPreviousNotificationValue,
8014                                           BaseType_t * pxHigherPriorityTaskWoken )
8015     {
8016         TCB_t * pxTCB;
8017         uint8_t ucOriginalNotifyState;
8018         BaseType_t xReturn = pdPASS;
8019         UBaseType_t uxSavedInterruptStatus;
8020
8021         traceENTER_xTaskGenericNotifyFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken );
8022
8023         configASSERT( xTaskToNotify );
8024         configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8025
8026         /* RTOS ports that support interrupt nesting have the concept of a
8027          * maximum  system call (or maximum API call) interrupt priority.
8028          * Interrupts that are  above the maximum system call priority are keep
8029          * permanently enabled, even when the RTOS kernel is in a critical section,
8030          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
8031          * is defined in FreeRTOSConfig.h then
8032          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
8033          * failure if a FreeRTOS API function is called from an interrupt that has
8034          * been assigned a priority above the configured maximum system call
8035          * priority.  Only FreeRTOS functions that end in FromISR can be called
8036          * from interrupts  that have been assigned a priority at or (logically)
8037          * below the maximum system call interrupt priority.  FreeRTOS maintains a
8038          * separate interrupt safe API to ensure interrupt entry is as fast and as
8039          * simple as possible.  More information (albeit Cortex-M specific) is
8040          * provided on the following link:
8041          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
8042         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
8043
8044         pxTCB = xTaskToNotify;
8045
8046         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
8047         {
8048             if( pulPreviousNotificationValue != NULL )
8049             {
8050                 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
8051             }
8052
8053             ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
8054             pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
8055
8056             switch( eAction )
8057             {
8058                 case eSetBits:
8059                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
8060                     break;
8061
8062                 case eIncrement:
8063                     ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
8064                     break;
8065
8066                 case eSetValueWithOverwrite:
8067                     pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
8068                     break;
8069
8070                 case eSetValueWithoutOverwrite:
8071
8072                     if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
8073                     {
8074                         pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
8075                     }
8076                     else
8077                     {
8078                         /* The value could not be written to the task. */
8079                         xReturn = pdFAIL;
8080                     }
8081
8082                     break;
8083
8084                 case eNoAction:
8085
8086                     /* The task is being notified without its notify value being
8087                      * updated. */
8088                     break;
8089
8090                 default:
8091
8092                     /* Should not get here if all enums are handled.
8093                      * Artificially force an assert by testing a value the
8094                      * compiler can't assume is const. */
8095                     configASSERT( xTickCount == ( TickType_t ) 0 );
8096                     break;
8097             }
8098
8099             traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
8100
8101             /* If the task is in the blocked state specifically to wait for a
8102              * notification then unblock it now. */
8103             if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
8104             {
8105                 /* The task should not have been on an event list. */
8106                 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
8107
8108                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
8109                 {
8110                     listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
8111                     prvAddTaskToReadyList( pxTCB );
8112                 }
8113                 else
8114                 {
8115                     /* The delayed and ready lists cannot be accessed, so hold
8116                      * this task pending until the scheduler is resumed. */
8117                     listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
8118                 }
8119
8120                 #if ( configNUMBER_OF_CORES == 1 )
8121                 {
8122                     if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
8123                     {
8124                         /* The notified task has a priority above the currently
8125                          * executing task so a yield is required. */
8126                         if( pxHigherPriorityTaskWoken != NULL )
8127                         {
8128                             *pxHigherPriorityTaskWoken = pdTRUE;
8129                         }
8130
8131                         /* Mark that a yield is pending in case the user is not
8132                          * using the "xHigherPriorityTaskWoken" parameter to an ISR
8133                          * safe FreeRTOS function. */
8134                         xYieldPendings[ 0 ] = pdTRUE;
8135                     }
8136                     else
8137                     {
8138                         mtCOVERAGE_TEST_MARKER();
8139                     }
8140                 }
8141                 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
8142                 {
8143                     #if ( configUSE_PREEMPTION == 1 )
8144                     {
8145                         prvYieldForTask( pxTCB );
8146
8147                         if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
8148                         {
8149                             if( pxHigherPriorityTaskWoken != NULL )
8150                             {
8151                                 *pxHigherPriorityTaskWoken = pdTRUE;
8152                             }
8153                         }
8154                     }
8155                     #endif /* if ( configUSE_PREEMPTION == 1 ) */
8156                 }
8157                 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
8158             }
8159         }
8160         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
8161
8162         traceRETURN_xTaskGenericNotifyFromISR( xReturn );
8163
8164         return xReturn;
8165     }
8166
8167 #endif /* configUSE_TASK_NOTIFICATIONS */
8168 /*-----------------------------------------------------------*/
8169
8170 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8171
8172     void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
8173                                         UBaseType_t uxIndexToNotify,
8174                                         BaseType_t * pxHigherPriorityTaskWoken )
8175     {
8176         TCB_t * pxTCB;
8177         uint8_t ucOriginalNotifyState;
8178         UBaseType_t uxSavedInterruptStatus;
8179
8180         traceENTER_vTaskGenericNotifyGiveFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken );
8181
8182         configASSERT( xTaskToNotify );
8183         configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8184
8185         /* RTOS ports that support interrupt nesting have the concept of a
8186          * maximum  system call (or maximum API call) interrupt priority.
8187          * Interrupts that are  above the maximum system call priority are keep
8188          * permanently enabled, even when the RTOS kernel is in a critical section,
8189          * but cannot make any calls to FreeRTOS API functions.  If configASSERT()
8190          * is defined in FreeRTOSConfig.h then
8191          * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
8192          * failure if a FreeRTOS API function is called from an interrupt that has
8193          * been assigned a priority above the configured maximum system call
8194          * priority.  Only FreeRTOS functions that end in FromISR can be called
8195          * from interrupts  that have been assigned a priority at or (logically)
8196          * below the maximum system call interrupt priority.  FreeRTOS maintains a
8197          * separate interrupt safe API to ensure interrupt entry is as fast and as
8198          * simple as possible.  More information (albeit Cortex-M specific) is
8199          * provided on the following link:
8200          * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
8201         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
8202
8203         pxTCB = xTaskToNotify;
8204
8205         uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
8206         {
8207             ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
8208             pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
8209
8210             /* 'Giving' is equivalent to incrementing a count in a counting
8211              * semaphore. */
8212             ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
8213
8214             traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
8215
8216             /* If the task is in the blocked state specifically to wait for a
8217              * notification then unblock it now. */
8218             if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
8219             {
8220                 /* The task should not have been on an event list. */
8221                 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
8222
8223                 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
8224                 {
8225                     listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
8226                     prvAddTaskToReadyList( pxTCB );
8227                 }
8228                 else
8229                 {
8230                     /* The delayed and ready lists cannot be accessed, so hold
8231                      * this task pending until the scheduler is resumed. */
8232                     listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
8233                 }
8234
8235                 #if ( configNUMBER_OF_CORES == 1 )
8236                 {
8237                     if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
8238                     {
8239                         /* The notified task has a priority above the currently
8240                          * executing task so a yield is required. */
8241                         if( pxHigherPriorityTaskWoken != NULL )
8242                         {
8243                             *pxHigherPriorityTaskWoken = pdTRUE;
8244                         }
8245
8246                         /* Mark that a yield is pending in case the user is not
8247                          * using the "xHigherPriorityTaskWoken" parameter in an ISR
8248                          * safe FreeRTOS function. */
8249                         xYieldPendings[ 0 ] = pdTRUE;
8250                     }
8251                     else
8252                     {
8253                         mtCOVERAGE_TEST_MARKER();
8254                     }
8255                 }
8256                 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
8257                 {
8258                     #if ( configUSE_PREEMPTION == 1 )
8259                     {
8260                         prvYieldForTask( pxTCB );
8261
8262                         if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
8263                         {
8264                             if( pxHigherPriorityTaskWoken != NULL )
8265                             {
8266                                 *pxHigherPriorityTaskWoken = pdTRUE;
8267                             }
8268                         }
8269                     }
8270                     #endif /* #if ( configUSE_PREEMPTION == 1 ) */
8271                 }
8272                 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
8273             }
8274         }
8275         taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
8276
8277         traceRETURN_vTaskGenericNotifyGiveFromISR();
8278     }
8279
8280 #endif /* configUSE_TASK_NOTIFICATIONS */
8281 /*-----------------------------------------------------------*/
8282
8283 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8284
8285     BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
8286                                              UBaseType_t uxIndexToClear )
8287     {
8288         TCB_t * pxTCB;
8289         BaseType_t xReturn;
8290
8291         traceENTER_xTaskGenericNotifyStateClear( xTask, uxIndexToClear );
8292
8293         configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8294
8295         /* If null is passed in here then it is the calling task that is having
8296          * its notification state cleared. */
8297         pxTCB = prvGetTCBFromHandle( xTask );
8298
8299         taskENTER_CRITICAL();
8300         {
8301             if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
8302             {
8303                 pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
8304                 xReturn = pdPASS;
8305             }
8306             else
8307             {
8308                 xReturn = pdFAIL;
8309             }
8310         }
8311         taskEXIT_CRITICAL();
8312
8313         traceRETURN_xTaskGenericNotifyStateClear( xReturn );
8314
8315         return xReturn;
8316     }
8317
8318 #endif /* configUSE_TASK_NOTIFICATIONS */
8319 /*-----------------------------------------------------------*/
8320
8321 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
8322
8323     uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
8324                                             UBaseType_t uxIndexToClear,
8325                                             uint32_t ulBitsToClear )
8326     {
8327         TCB_t * pxTCB;
8328         uint32_t ulReturn;
8329
8330         traceENTER_ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear );
8331
8332         configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
8333
8334         /* If null is passed in here then it is the calling task that is having
8335          * its notification state cleared. */
8336         pxTCB = prvGetTCBFromHandle( xTask );
8337
8338         taskENTER_CRITICAL();
8339         {
8340             /* Return the notification as it was before the bits were cleared,
8341              * then clear the bit mask. */
8342             ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
8343             pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
8344         }
8345         taskEXIT_CRITICAL();
8346
8347         traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );
8348
8349         return ulReturn;
8350     }
8351
8352 #endif /* configUSE_TASK_NOTIFICATIONS */
8353 /*-----------------------------------------------------------*/
8354
8355 #if ( configGENERATE_RUN_TIME_STATS == 1 )
8356
8357     configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask )
8358     {
8359         TCB_t * pxTCB;
8360
8361         traceENTER_ulTaskGetRunTimeCounter( xTask );
8362
8363         pxTCB = prvGetTCBFromHandle( xTask );
8364
8365         traceRETURN_ulTaskGetRunTimeCounter( pxTCB->ulRunTimeCounter );
8366
8367         return pxTCB->ulRunTimeCounter;
8368     }
8369
8370 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
8371 /*-----------------------------------------------------------*/
8372
8373 #if ( configGENERATE_RUN_TIME_STATS == 1 )
8374
8375     configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask )
8376     {
8377         TCB_t * pxTCB;
8378         configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
8379
8380         traceENTER_ulTaskGetRunTimePercent( xTask );
8381
8382         ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
8383
8384         /* For percentage calculations. */
8385         ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
8386
8387         /* Avoid divide by zero errors. */
8388         if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
8389         {
8390             pxTCB = prvGetTCBFromHandle( xTask );
8391             ulReturn = pxTCB->ulRunTimeCounter / ulTotalTime;
8392         }
8393         else
8394         {
8395             ulReturn = 0;
8396         }
8397
8398         traceRETURN_ulTaskGetRunTimePercent( ulReturn );
8399
8400         return ulReturn;
8401     }
8402
8403 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
8404 /*-----------------------------------------------------------*/
8405
8406 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
8407
8408     configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
8409     {
8410         configRUN_TIME_COUNTER_TYPE ulReturn = 0;
8411         BaseType_t i;
8412
8413         traceENTER_ulTaskGetIdleRunTimeCounter();
8414
8415         for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
8416         {
8417             ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter;
8418         }
8419
8420         traceRETURN_ulTaskGetIdleRunTimeCounter( ulReturn );
8421
8422         return ulReturn;
8423     }
8424
8425 #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
8426 /*-----------------------------------------------------------*/
8427
8428 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
8429
8430     configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
8431     {
8432         configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
8433         configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0;
8434         BaseType_t i;
8435
8436         traceENTER_ulTaskGetIdleRunTimePercent();
8437
8438         ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES;
8439
8440         /* For percentage calculations. */
8441         ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
8442
8443         /* Avoid divide by zero errors. */
8444         if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
8445         {
8446             for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
8447             {
8448                 ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter;
8449             }
8450
8451             ulReturn = ulRunTimeCounter / ulTotalTime;
8452         }
8453         else
8454         {
8455             ulReturn = 0;
8456         }
8457
8458         traceRETURN_ulTaskGetIdleRunTimePercent( ulReturn );
8459
8460         return ulReturn;
8461     }
8462
8463 #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
8464 /*-----------------------------------------------------------*/
8465
8466 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
8467                                             const BaseType_t xCanBlockIndefinitely )
8468 {
8469     TickType_t xTimeToWake;
8470     const TickType_t xConstTickCount = xTickCount;
8471     List_t * const pxDelayedList = pxDelayedTaskList;
8472     List_t * const pxOverflowDelayedList = pxOverflowDelayedTaskList;
8473
8474     #if ( INCLUDE_xTaskAbortDelay == 1 )
8475     {
8476         /* About to enter a delayed list, so ensure the ucDelayAborted flag is
8477          * reset to pdFALSE so it can be detected as having been set to pdTRUE
8478          * when the task leaves the Blocked state. */
8479         pxCurrentTCB->ucDelayAborted = pdFALSE;
8480     }
8481     #endif
8482
8483     /* Remove the task from the ready list before adding it to the blocked list
8484      * as the same list item is used for both lists. */
8485     if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
8486     {
8487         /* The current task must be in a ready list, so there is no need to
8488          * check, and the port reset macro can be called directly. */
8489         portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority );
8490     }
8491     else
8492     {
8493         mtCOVERAGE_TEST_MARKER();
8494     }
8495
8496     #if ( INCLUDE_vTaskSuspend == 1 )
8497     {
8498         if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
8499         {
8500             /* Add the task to the suspended task list instead of a delayed task
8501              * list to ensure it is not woken by a timing event.  It will block
8502              * indefinitely. */
8503             listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
8504         }
8505         else
8506         {
8507             /* Calculate the time at which the task should be woken if the event
8508              * does not occur.  This may overflow but this doesn't matter, the
8509              * kernel will manage it correctly. */
8510             xTimeToWake = xConstTickCount + xTicksToWait;
8511
8512             /* The list item will be inserted in wake time order. */
8513             listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
8514
8515             if( xTimeToWake < xConstTickCount )
8516             {
8517                 /* Wake time has overflowed.  Place this item in the overflow
8518                  * list. */
8519                 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
8520                 vListInsert( pxOverflowDelayedList, &( pxCurrentTCB->xStateListItem ) );
8521             }
8522             else
8523             {
8524                 /* The wake time has not overflowed, so the current block list
8525                  * is used. */
8526                 traceMOVED_TASK_TO_DELAYED_LIST();
8527                 vListInsert( pxDelayedList, &( pxCurrentTCB->xStateListItem ) );
8528
8529                 /* If the task entering the blocked state was placed at the
8530                  * head of the list of blocked tasks then xNextTaskUnblockTime
8531                  * needs to be updated too. */
8532                 if( xTimeToWake < xNextTaskUnblockTime )
8533                 {
8534                     xNextTaskUnblockTime = xTimeToWake;
8535                 }
8536                 else
8537                 {
8538                     mtCOVERAGE_TEST_MARKER();
8539                 }
8540             }
8541         }
8542     }
8543     #else /* INCLUDE_vTaskSuspend */
8544     {
8545         /* Calculate the time at which the task should be woken if the event
8546          * does not occur.  This may overflow but this doesn't matter, the kernel
8547          * will manage it correctly. */
8548         xTimeToWake = xConstTickCount + xTicksToWait;
8549
8550         /* The list item will be inserted in wake time order. */
8551         listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
8552
8553         if( xTimeToWake < xConstTickCount )
8554         {
8555             traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
8556             /* Wake time has overflowed.  Place this item in the overflow list. */
8557             vListInsert( pxOverflowDelayedList, &( pxCurrentTCB->xStateListItem ) );
8558         }
8559         else
8560         {
8561             traceMOVED_TASK_TO_DELAYED_LIST();
8562             /* The wake time has not overflowed, so the current block list is used. */
8563             vListInsert( pxDelayedList, &( pxCurrentTCB->xStateListItem ) );
8564
8565             /* If the task entering the blocked state was placed at the head of the
8566              * list of blocked tasks then xNextTaskUnblockTime needs to be updated
8567              * too. */
8568             if( xTimeToWake < xNextTaskUnblockTime )
8569             {
8570                 xNextTaskUnblockTime = xTimeToWake;
8571             }
8572             else
8573             {
8574                 mtCOVERAGE_TEST_MARKER();
8575             }
8576         }
8577
8578         /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
8579         ( void ) xCanBlockIndefinitely;
8580     }
8581     #endif /* INCLUDE_vTaskSuspend */
8582 }
8583 /*-----------------------------------------------------------*/
8584
8585 #if ( portUSING_MPU_WRAPPERS == 1 )
8586
8587     xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask )
8588     {
8589         TCB_t * pxTCB;
8590
8591         traceENTER_xTaskGetMPUSettings( xTask );
8592
8593         pxTCB = prvGetTCBFromHandle( xTask );
8594
8595         traceRETURN_xTaskGetMPUSettings( &( pxTCB->xMPUSettings ) );
8596
8597         return &( pxTCB->xMPUSettings );
8598     }
8599
8600 #endif /* portUSING_MPU_WRAPPERS */
8601 /*-----------------------------------------------------------*/
8602
8603 /* Code below here allows additional code to be inserted into this source file,
8604  * especially where access to file scope functions and data is needed (for example
8605  * when performing module tests). */
8606
8607 #ifdef FREERTOS_MODULE_TEST
8608     #include "tasks_test_access_functions.h"
8609 #endif
8610
8611
8612 #if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
8613
8614     #include "freertos_tasks_c_additions.h"
8615
8616     #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
8617         static void freertos_tasks_c_additions_init( void )
8618         {
8619             FREERTOS_TASKS_C_ADDITIONS_INIT();
8620         }
8621     #endif
8622
8623 #endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */
8624 /*-----------------------------------------------------------*/
8625
8626 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
8627
8628 /*
8629  * This is the kernel provided implementation of vApplicationGetIdleTaskMemory()
8630  * to provide the memory that is used by the Idle task. It is used when
8631  * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
8632  * it's own implementation of vApplicationGetIdleTaskMemory by setting
8633  * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
8634  */
8635     void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
8636                                         StackType_t ** ppxIdleTaskStackBuffer,
8637                                         uint32_t * pulIdleTaskStackSize )
8638     {
8639         static StaticTask_t xIdleTaskTCB;
8640         static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ];
8641
8642         *ppxIdleTaskTCBBuffer = &( xIdleTaskTCB );
8643         *ppxIdleTaskStackBuffer = &( uxIdleTaskStack[ 0 ] );
8644         *pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
8645     }
8646
8647     #if ( configNUMBER_OF_CORES > 1 )
8648
8649         void vApplicationGetPassiveIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
8650                                                    StackType_t ** ppxIdleTaskStackBuffer,
8651                                                    uint32_t * pulIdleTaskStackSize,
8652                                                    BaseType_t xPassiveIdleTaskIndex )
8653         {
8654             static StaticTask_t xIdleTaskTCBs[ configNUMBER_OF_CORES - 1 ];
8655             static StackType_t uxIdleTaskStacks[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ];
8656
8657             *ppxIdleTaskTCBBuffer = &( xIdleTaskTCBs[ xPassiveIdleTaskIndex ] );
8658             *ppxIdleTaskStackBuffer = &( uxIdleTaskStacks[ xPassiveIdleTaskIndex ][ 0 ] );
8659             *pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
8660         }
8661
8662     #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
8663
8664 #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
8665 /*-----------------------------------------------------------*/
8666
8667 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
8668
8669 /*
8670  * This is the kernel provided implementation of vApplicationGetTimerTaskMemory()
8671  * to provide the memory that is used by the Timer service task. It is used when
8672  * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
8673  * it's own implementation of vApplicationGetTimerTaskMemory by setting
8674  * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
8675  */
8676     void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer,
8677                                          StackType_t ** ppxTimerTaskStackBuffer,
8678                                          uint32_t * pulTimerTaskStackSize )
8679     {
8680         static StaticTask_t xTimerTaskTCB;
8681         static StackType_t uxTimerTaskStack[ configTIMER_TASK_STACK_DEPTH ];
8682
8683         *ppxTimerTaskTCBBuffer = &( xTimerTaskTCB );
8684         *ppxTimerTaskStackBuffer = &( uxTimerTaskStack[ 0 ] );
8685         *pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
8686     }
8687
8688 #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
8689 /*-----------------------------------------------------------*/