2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * SPDX-License-Identifier: MIT
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
29 /* Standard includes. */
33 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
34 * all the API functions to use the MPU wrappers. That should only be done when
35 * task.h is included from an application file. */
36 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
38 /* FreeRTOS includes. */
42 #include "stack_macros.h"
44 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
45 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
46 * for the header files above, but not in this file, in order to generate the
47 * correct privileged Vs unprivileged linkage and placement. */
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
50 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
51 * functions but without including stdio.h here. */
52 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
54 /* At the bottom of this file are two optional functions that can be used
55 * to generate human readable text from the raw data generated by the
56 * uxTaskGetSystemState() function. Note the formatting functions are provided
57 * for convenience only, and are NOT considered part of the kernel. */
59 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
61 #if ( configUSE_PREEMPTION == 0 )
63 /* If the cooperative scheduler is being used then a yield should not be
64 * performed just because a higher priority task has been woken. */
65 #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB )
66 #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB )
69 #if ( configNUMBER_OF_CORES == 1 )
71 /* This macro requests the running task pxTCB to yield. In single core
72 * scheduler, a running task always runs on core 0 and portYIELD_WITHIN_API()
73 * can be used to request the task running on core 0 to yield. Therefore, pxTCB
74 * is not used in this macro. */
75 #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB ) \
78 portYIELD_WITHIN_API(); \
81 #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ) \
83 if( pxCurrentTCB->uxPriority < ( pxTCB )->uxPriority ) \
85 portYIELD_WITHIN_API(); \
89 mtCOVERAGE_TEST_MARKER(); \
93 #else /* if ( configNUMBER_OF_CORES == 1 ) */
95 /* Yield the core on which this task is running. */
96 #define taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB ) prvYieldCore( ( pxTCB )->xTaskRunState )
98 /* Yield for the task if a running task has priority lower than this task. */
99 #define taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB ) prvYieldForTask( pxTCB )
101 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
103 #endif /* if ( configUSE_PREEMPTION == 0 ) */
105 /* Values that can be assigned to the ucNotifyState member of the TCB. */
106 #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
107 #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
108 #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
111 * The value used to fill the stack of a task when the task is created. This
112 * is used purely for checking the high water mark for tasks.
114 #define tskSTACK_FILL_BYTE ( 0xa5U )
116 /* Bits used to record how a task's stack and TCB were allocated. */
117 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
118 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
119 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
121 /* If any of the following are set then task stacks are filled with a known
122 * value so the high water mark can be determined. If none of the following are
123 * set then don't fill the stack so there is no unnecessary dependency on memset. */
124 #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
125 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
127 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
131 * Macros used by vListTask to indicate which state a task is in.
133 #define tskRUNNING_CHAR ( 'X' )
134 #define tskBLOCKED_CHAR ( 'B' )
135 #define tskREADY_CHAR ( 'R' )
136 #define tskDELETED_CHAR ( 'D' )
137 #define tskSUSPENDED_CHAR ( 'S' )
140 * Some kernel aware debuggers require the data the debugger needs access to to
141 * be global, rather than file scope.
143 #ifdef portREMOVE_STATIC_QUALIFIER
147 /* The name allocated to the Idle task. This can be overridden by defining
148 * configIDLE_TASK_NAME in FreeRTOSConfig.h. */
149 #ifndef configIDLE_TASK_NAME
150 #define configIDLE_TASK_NAME "IDLE"
153 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
155 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
156 * performed in a generic way that is not optimised to any particular
157 * microcontroller architecture. */
159 /* uxTopReadyPriority holds the priority of the highest priority ready
161 #define taskRECORD_READY_PRIORITY( uxPriority ) \
163 if( ( uxPriority ) > uxTopReadyPriority ) \
165 uxTopReadyPriority = ( uxPriority ); \
167 } while( 0 ) /* taskRECORD_READY_PRIORITY */
169 /*-----------------------------------------------------------*/
171 #if ( configNUMBER_OF_CORES == 1 )
172 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
174 UBaseType_t uxTopPriority = uxTopReadyPriority; \
176 /* Find the highest priority queue that contains ready tasks. */ \
177 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
179 configASSERT( uxTopPriority ); \
183 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
184 * the same priority get an equal share of the processor time. */ \
185 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
186 uxTopReadyPriority = uxTopPriority; \
187 } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */
188 #else /* if ( configNUMBER_OF_CORES == 1 ) */
190 #define taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID ) prvSelectHighestPriorityTask( xCoreID )
192 #endif /* if ( configNUMBER_OF_CORES == 1 ) */
194 /*-----------------------------------------------------------*/
196 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
197 * they are only required when a port optimised method of task selection is
199 #define taskRESET_READY_PRIORITY( uxPriority )
200 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
202 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
204 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
205 * performed in a way that is tailored to the particular microcontroller
206 * architecture being used. */
208 /* A port optimised version is provided. Call the port defined macros. */
209 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( ( uxPriority ), uxTopReadyPriority )
211 /*-----------------------------------------------------------*/
213 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
215 UBaseType_t uxTopPriority; \
217 /* Find the highest priority list that contains ready tasks. */ \
218 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
219 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
220 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
223 /*-----------------------------------------------------------*/
225 /* A port optimised version is provided, call it only if the TCB being reset
226 * is being referenced from a ready list. If it is referenced from a delayed
227 * or suspended list then it won't be in a ready list. */
228 #define taskRESET_READY_PRIORITY( uxPriority ) \
230 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
232 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
236 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
238 /*-----------------------------------------------------------*/
240 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
241 * count overflows. */
242 #define taskSWITCH_DELAYED_LISTS() \
246 /* The delayed tasks list should be empty when the lists are switched. */ \
247 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
249 pxTemp = pxDelayedTaskList; \
250 pxDelayedTaskList = pxOverflowDelayedTaskList; \
251 pxOverflowDelayedTaskList = pxTemp; \
253 prvResetNextTaskUnblockTime(); \
256 /*-----------------------------------------------------------*/
259 * Place the task represented by pxTCB into the appropriate ready list for
260 * the task. It is inserted at the end of the list.
262 #define prvAddTaskToReadyList( pxTCB ) \
264 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
265 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
266 listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
267 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \
269 /*-----------------------------------------------------------*/
272 * Several functions take a TaskHandle_t parameter that can optionally be NULL,
273 * where NULL is used to indicate that the handle of the currently executing
274 * task should be used in place of the parameter. This macro simply checks to
275 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
277 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
279 /* The item value of the event list item is normally used to hold the priority
280 * of the task to which it belongs (coded to allow it to be held in reverse
281 * priority order). However, it is occasionally borrowed for other purposes. It
282 * is important its value is not updated due to a task priority change while it is
283 * being used for another purpose. The following bit definition is used to inform
284 * the scheduler that the value should not be changed - in which case it is the
285 * responsibility of whichever module is using the value to ensure it gets set back
286 * to its original value when it is released. */
287 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
288 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
289 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
290 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
291 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
292 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000000000000000ULL
295 /* Indicates that the task is not actively running on any core. */
296 #define taskTASK_NOT_RUNNING ( ( BaseType_t ) ( -1 ) )
298 /* Indicates that the task is actively running but scheduled to yield. */
299 #define taskTASK_SCHEDULED_TO_YIELD ( ( BaseType_t ) ( -2 ) )
301 /* Returns pdTRUE if the task is actively running and not scheduled to yield. */
302 #if ( configNUMBER_OF_CORES == 1 )
303 #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
304 #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) )
306 #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( ( pxTCB )->xTaskRunState >= ( BaseType_t ) 0 ) && ( ( pxTCB )->xTaskRunState < ( BaseType_t ) configNUMBER_OF_CORES ) ) ? ( pdTRUE ) : ( pdFALSE ) )
307 #define taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) ( ( ( pxTCB )->xTaskRunState != taskTASK_NOT_RUNNING ) ? ( pdTRUE ) : ( pdFALSE ) )
310 /* Indicates that the task is an Idle task. */
311 #define taskATTRIBUTE_IS_IDLE ( UBaseType_t ) ( 1UL << 0UL )
313 #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) )
314 #define portGET_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting )
315 #define portSET_CRITICAL_NESTING_COUNT( x ) ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting = ( x ) )
316 #define portINCREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting++ )
317 #define portDECREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- )
318 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */
320 /* Code below here allows infinite loop controlling, especially for the infinite loop
321 * in idle task function (for example when performing unit tests). */
322 #ifndef INFINITE_LOOP
323 #define INFINITE_LOOP() 1
326 #define taskBITS_PER_BYTE ( ( size_t ) 8 )
329 * Task control block. A task control block (TCB) is allocated for each task,
330 * and stores task state information, including a pointer to the task's context
331 * (the task's run time environment, including register values)
333 typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
335 volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
337 #if ( portUSING_MPU_WRAPPERS == 1 )
338 xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
341 #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
342 UBaseType_t uxCoreAffinityMask; /**< Used to link the task to certain cores. UBaseType_t must have greater than or equal to the number of bits as configNUMBER_OF_CORES. */
345 ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
346 ListItem_t xEventListItem; /**< Used to reference a task from an event list. */
347 UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */
348 StackType_t * pxStack; /**< Points to the start of the stack. */
349 #if ( configNUMBER_OF_CORES > 1 )
350 volatile BaseType_t xTaskRunState; /**< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */
351 UBaseType_t uxTaskAttributes; /**< Task's attributes - currently used to identify the idle tasks. */
353 char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
355 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
356 BaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */
359 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
360 StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
363 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
364 UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
367 #if ( configUSE_TRACE_FACILITY == 1 )
368 UBaseType_t uxTCBNumber; /**< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
369 UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */
372 #if ( configUSE_MUTEXES == 1 )
373 UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */
374 UBaseType_t uxMutexesHeld;
377 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
378 TaskHookFunction_t pxTaskTag;
381 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
382 void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
385 #if ( configGENERATE_RUN_TIME_STATS == 1 )
386 configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */
389 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
390 configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */
393 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
394 volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
395 volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
398 /* See the comments in FreeRTOS.h with the definition of
399 * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
400 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
401 uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
404 #if ( INCLUDE_xTaskAbortDelay == 1 )
405 uint8_t ucDelayAborted;
408 #if ( configUSE_POSIX_ERRNO == 1 )
413 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
414 * below to enable the use of older kernel aware debuggers. */
415 typedef tskTCB TCB_t;
417 /*lint -save -e956 A manual analysis and inspection has been used to determine
418 * which static variables must be declared volatile. */
419 #if ( configNUMBER_OF_CORES == 1 )
420 portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
422 /* MISRA Ref 8.4.1 [Declaration shall be visible] */
423 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
424 /* coverity[misra_c_2012_rule_8_4_violation] */
425 portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ];
426 #define pxCurrentTCB xTaskGetCurrentTaskHandle()
429 /* Lists for ready and blocked tasks. --------------------
430 * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
431 * doing so breaks some kernel aware debuggers and debuggers that rely on removing
432 * the static qualifier. */
433 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */
434 PRIVILEGED_DATA static List_t xDelayedTaskList1; /**< Delayed tasks. */
435 PRIVILEGED_DATA static List_t xDelayedTaskList2; /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
436 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /**< Points to the delayed task list currently being used. */
437 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
438 PRIVILEGED_DATA static List_t xPendingReadyList; /**< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
440 #if ( INCLUDE_vTaskDelete == 1 )
442 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */
443 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
447 #if ( INCLUDE_vTaskSuspend == 1 )
449 PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */
453 /* Global POSIX errno. Its value is changed upon context switching to match
454 * the errno of the currently running task. */
455 #if ( configUSE_POSIX_ERRNO == 1 )
456 int FreeRTOS_errno = 0;
459 /* Other file private variables. --------------------------------*/
460 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
461 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
462 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
463 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
464 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
465 PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE };
466 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
467 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
468 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
469 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ]; /**< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */
471 /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
472 * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
473 * to determine the number of priority lists to read back from the remote target. */
474 const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U;
476 /* Context switches are held pending while the scheduler is suspended. Also,
477 * interrupts must not manipulate the xStateListItem of a TCB, or any of the
478 * lists the xStateListItem can be referenced from, if the scheduler is suspended.
479 * If an interrupt needs to unblock a task while the scheduler is suspended then it
480 * moves the task's event list item into the xPendingReadyList, ready for the
481 * kernel to move the task from the pending ready list into the real ready list
482 * when the scheduler is unsuspended. The pending ready list itself can only be
483 * accessed from a critical section.
485 * Updates to uxSchedulerSuspended must be protected by both the task lock and the ISR lock
486 * and must not be done from an ISR. Reads must be protected by either lock and may be done
487 * from either an ISR or a task. */
488 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U;
490 #if ( configGENERATE_RUN_TIME_STATS == 1 )
492 /* Do not move these variables to function scope as doing so prevents the
493 * code working with debuggers that need to remove the static qualifier. */
494 PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the value of a timer/counter the last time a task was switched in. */
495 PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the total amount of execution time as defined by the run time counter clock. */
499 #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 )
501 /* Do not move these variables to function scope as doing so prevents the
502 * code working with debuggers that need to remove the static qualifier. */
503 static StaticTask_t xIdleTCBBuffers[ configNUMBER_OF_CORES - 1 ];
504 static StackType_t xIdleTaskStackBuffers[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ];
506 #endif /* #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
510 /*-----------------------------------------------------------*/
512 /* File private functions. --------------------------------*/
515 * Creates the idle tasks during scheduler start.
517 static BaseType_t prvCreateIdleTasks( void );
519 #if ( configNUMBER_OF_CORES > 1 )
522 * Checks to see if another task moved the current task out of the ready
523 * list while it was waiting to enter a critical section and yields, if so.
525 static void prvCheckForRunStateChange( void );
526 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
528 #if ( configNUMBER_OF_CORES > 1 )
531 * Yields the given core.
533 static void prvYieldCore( BaseType_t xCoreID );
534 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
536 #if ( configNUMBER_OF_CORES > 1 )
539 * Yields a core, or cores if multiple priorities are not allowed to run
540 * simultaneously, to allow the task pxTCB to run.
542 static void prvYieldForTask( const TCB_t * pxTCB );
543 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
545 #if ( configNUMBER_OF_CORES > 1 )
548 * Selects the highest priority available task for the given core.
550 static void prvSelectHighestPriorityTask( BaseType_t xCoreID );
551 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
554 * Utility task that simply returns pdTRUE if the task referenced by xTask is
555 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
556 * is in any other state.
558 #if ( INCLUDE_vTaskSuspend == 1 )
560 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
562 #endif /* INCLUDE_vTaskSuspend */
565 * Utility to ready all the lists used by the scheduler. This is called
566 * automatically upon the creation of the first task.
568 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
571 * The idle task, which as all tasks is implemented as a never ending loop.
572 * The idle task is automatically created and added to the ready lists upon
573 * creation of the first user task.
575 * In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 minimal idle tasks are also
576 * created to ensure that each core has an idle task to run when no other
577 * task is available to run.
579 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
580 * language extensions. The equivalent prototype for these functions are:
582 * void prvIdleTask( void *pvParameters );
583 * void prvMinimalIdleTask( void *pvParameters );
586 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
587 #if ( configNUMBER_OF_CORES > 1 )
588 static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
592 * Utility to free all memory allocated by the scheduler to hold a TCB,
593 * including the stack pointed to by the TCB.
595 * This does not free memory allocated by the task itself (i.e. memory
596 * allocated by calls to pvPortMalloc from within the tasks application code).
598 #if ( INCLUDE_vTaskDelete == 1 )
600 static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
605 * Used only by the idle task. This checks to see if anything has been placed
606 * in the list of tasks waiting to be deleted. If so the task is cleaned up
607 * and its TCB deleted.
609 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
612 * The currently executing task is entering the Blocked state. Add the task to
613 * either the current or the overflow delayed task list.
615 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
616 const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
619 * Fills an TaskStatus_t structure with information on each task that is
620 * referenced from the pxList list (which may be a ready list, a delayed list,
621 * a suspended list, etc.).
623 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
624 * NORMAL APPLICATION CODE.
626 #if ( configUSE_TRACE_FACILITY == 1 )
628 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
630 eTaskState eState ) PRIVILEGED_FUNCTION;
635 * Searches pxList for a task with name pcNameToQuery - returning a handle to
636 * the task if it is found, or NULL if the task is not found.
638 #if ( INCLUDE_xTaskGetHandle == 1 )
640 static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
641 const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
646 * When a task is created, the stack of the task is filled with a known value.
647 * This function determines the 'high water mark' of the task stack by
648 * determining how much of the stack remains at the original preset value.
650 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
652 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
657 * Return the amount of time, in ticks, that will pass before the kernel will
658 * next move a task from the Blocked state to the Running state.
660 * This conditional compilation should use inequality to 0, not equality to 1.
661 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
662 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
663 * set to a value other than 1.
665 #if ( configUSE_TICKLESS_IDLE != 0 )
667 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
672 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
673 * will exit the Blocked state.
675 static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
677 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
680 * Helper function used to pad task names with spaces when printing out
681 * human readable tables of task information.
683 static char * prvWriteNameToBuffer( char * pcBuffer,
684 const char * pcTaskName ) PRIVILEGED_FUNCTION;
689 * Called after a Task_t structure has been allocated either statically or
690 * dynamically to fill in the structure's members.
692 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
693 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
694 const uint32_t ulStackDepth,
695 void * const pvParameters,
696 UBaseType_t uxPriority,
697 TaskHandle_t * const pxCreatedTask,
699 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
702 * Called after a new task has been created and initialised to place the task
703 * under the control of the scheduler.
705 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
708 * freertos_tasks_c_additions_init() should only be called if the user definable
709 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
710 * called by the function.
712 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
714 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
718 #if ( configUSE_MINIMAL_IDLE_HOOK == 1 )
719 extern void vApplicationMinimalIdleHook( void );
720 #endif /* #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) */
722 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
725 * Convert the snprintf return value to the number of characters
726 * written. The following are the possible cases:
728 * 1. The buffer supplied to snprintf is large enough to hold the
729 * generated string. The return value in this case is the number
730 * of characters actually written, not counting the terminating
732 * 2. The buffer supplied to snprintf is NOT large enough to hold
733 * the generated string. The return value in this case is the
734 * number of characters that would have been written if the
735 * buffer had been sufficiently large, not counting the
736 * terminating null character.
737 * 3. Encoding error. The return value in this case is a negative
740 * From 1 and 2 above ==> Only when the return value is non-negative
741 * and less than the supplied buffer length, the string has been
742 * completely written.
744 static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
747 #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
748 /*-----------------------------------------------------------*/
750 #if ( configNUMBER_OF_CORES > 1 )
751 static void prvCheckForRunStateChange( void )
753 UBaseType_t uxPrevCriticalNesting;
754 const TCB_t * pxThisTCB;
756 /* This must only be called from within a task. */
757 portASSERT_IF_IN_ISR();
759 /* This function is always called with interrupts disabled
760 * so this is safe. */
761 pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ];
763 while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
765 /* We are only here if we just entered a critical section
766 * or if we just suspended the scheduler, and another task
767 * has requested that we yield.
769 * This is slightly complicated since we need to save and restore
770 * the suspension and critical nesting counts, as well as release
771 * and reacquire the correct locks. And then, do it all over again
772 * if our state changed again during the reacquisition. */
773 uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT();
775 if( uxPrevCriticalNesting > 0U )
777 portSET_CRITICAL_NESTING_COUNT( 0U );
778 portRELEASE_ISR_LOCK();
782 /* The scheduler is suspended. uxSchedulerSuspended is updated
783 * only when the task is not requested to yield. */
784 mtCOVERAGE_TEST_MARKER();
787 portRELEASE_TASK_LOCK();
788 portMEMORY_BARRIER();
789 configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD );
791 portENABLE_INTERRUPTS();
793 /* Enabling interrupts should cause this core to immediately
794 * service the pending interrupt and yield. If the run state is still
795 * yielding here then that is a problem. */
796 configASSERT( pxThisTCB->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD );
798 portDISABLE_INTERRUPTS();
802 portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting );
804 if( uxPrevCriticalNesting == 0U )
806 portRELEASE_ISR_LOCK();
810 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
812 /*-----------------------------------------------------------*/
814 #if ( configNUMBER_OF_CORES > 1 )
815 static void prvYieldCore( BaseType_t xCoreID )
817 /* This must be called from a critical section and xCoreID must be valid. */
818 if( ( portCHECK_IF_IN_ISR() == pdTRUE ) && ( xCoreID == ( BaseType_t ) portGET_CORE_ID() ) )
820 xYieldPendings[ xCoreID ] = pdTRUE;
824 if( pxCurrentTCBs[ xCoreID ]->xTaskRunState != taskTASK_SCHEDULED_TO_YIELD )
826 if( xCoreID == ( BaseType_t ) portGET_CORE_ID() )
828 xYieldPendings[ xCoreID ] = pdTRUE;
832 portYIELD_CORE( xCoreID );
833 pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_SCHEDULED_TO_YIELD;
838 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
839 /*-----------------------------------------------------------*/
841 #if ( configNUMBER_OF_CORES > 1 )
842 static void prvYieldForTask( const TCB_t * pxTCB )
844 BaseType_t xLowestPriorityToPreempt;
845 BaseType_t xCurrentCoreTaskPriority;
846 BaseType_t xLowestPriorityCore = ( BaseType_t ) -1;
849 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
850 BaseType_t xYieldCount = 0;
851 #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
853 /* This must be called from a critical section. */
854 configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
856 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
858 /* No task should yield for this one if it is a lower priority
859 * than priority level of currently ready tasks. */
860 if( pxTCB->uxPriority >= uxTopReadyPriority )
862 /* Yield is not required for a task which is already running. */
863 if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
866 xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority;
868 /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB
869 * is 0. This is ok as we will give system idle tasks a priority of -1 below. */
870 --xLowestPriorityToPreempt;
872 for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
874 xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority;
876 /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */
877 if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
879 xCurrentCoreTaskPriority = xCurrentCoreTaskPriority - 1;
882 if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) )
884 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
885 if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE )
888 if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt )
890 #if ( configUSE_CORE_AFFINITY == 1 )
891 if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
894 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
895 if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
898 xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
899 xLowestPriorityCore = xCoreID;
905 mtCOVERAGE_TEST_MARKER();
909 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
911 /* Yield all currently running non-idle tasks with a priority lower than
912 * the task that needs to run. */
913 if( ( xCurrentCoreTaskPriority > ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) ) &&
914 ( xCurrentCoreTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) )
916 prvYieldCore( xCoreID );
921 mtCOVERAGE_TEST_MARKER();
924 #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
928 mtCOVERAGE_TEST_MARKER();
932 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
933 if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) )
934 #else /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
935 if( xLowestPriorityCore >= 0 )
936 #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
938 prvYieldCore( xLowestPriorityCore );
941 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
942 /* Verify that the calling core always yields to higher priority tasks. */
943 if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0 ) &&
944 ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) )
946 configASSERT( ( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) ||
947 ( taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ) );
952 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
953 /*-----------------------------------------------------------*/
955 #if ( configNUMBER_OF_CORES > 1 )
956 static void prvSelectHighestPriorityTask( BaseType_t xCoreID )
958 UBaseType_t uxCurrentPriority = uxTopReadyPriority;
959 BaseType_t xTaskScheduled = pdFALSE;
960 BaseType_t xDecrementTopPriority = pdTRUE;
962 #if ( configUSE_CORE_AFFINITY == 1 )
963 const TCB_t * pxPreviousTCB = NULL;
965 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
966 BaseType_t xPriorityDropped = pdFALSE;
969 /* This function should be called when scheduler is running. */
970 configASSERT( xSchedulerRunning == pdTRUE );
972 /* A new task is created and a running task with the same priority yields
973 * itself to run the new task. When a running task yields itself, it is still
974 * in the ready list. This running task will be selected before the new task
975 * since the new task is always added to the end of the ready list.
976 * The other problem is that the running task still in the same position of
977 * the ready list when it yields itself. It is possible that it will be selected
978 * earlier then other tasks which waits longer than this task.
980 * To fix these problems, the running task should be put to the end of the
981 * ready list before searching for the ready task in the ready list. */
982 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
983 &pxCurrentTCBs[ xCoreID ]->xStateListItem ) == pdTRUE )
985 ( void ) uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem );
986 vListInsertEnd( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ),
987 &pxCurrentTCBs[ xCoreID ]->xStateListItem );
990 while( xTaskScheduled == pdFALSE )
992 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
994 if( uxCurrentPriority < uxTopReadyPriority )
996 /* We can't schedule any tasks, other than idle, that have a
997 * priority lower than the priority of a task currently running
998 * on another core. */
999 uxCurrentPriority = tskIDLE_PRIORITY;
1004 if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE )
1006 const List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] );
1007 const ListItem_t * pxEndMarker = listGET_END_MARKER( pxReadyList );
1008 ListItem_t * pxIterator;
1010 /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority
1011 * must not be decremented any further. */
1012 xDecrementTopPriority = pdFALSE;
1014 for( pxIterator = listGET_HEAD_ENTRY( pxReadyList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
1016 TCB_t * pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxIterator );
1018 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1020 /* When falling back to the idle priority because only one priority
1021 * level is allowed to run at a time, we should ONLY schedule the true
1022 * idle tasks, not user tasks at the idle priority. */
1023 if( uxCurrentPriority < uxTopReadyPriority )
1025 if( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0 )
1031 #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
1033 if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
1035 #if ( configUSE_CORE_AFFINITY == 1 )
1036 if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1039 /* If the task is not being executed by any core swap it in. */
1040 pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING;
1041 #if ( configUSE_CORE_AFFINITY == 1 )
1042 pxPreviousTCB = pxCurrentTCBs[ xCoreID ];
1044 pxTCB->xTaskRunState = xCoreID;
1045 pxCurrentTCBs[ xCoreID ] = pxTCB;
1046 xTaskScheduled = pdTRUE;
1049 else if( pxTCB == pxCurrentTCBs[ xCoreID ] )
1051 configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) );
1053 #if ( configUSE_CORE_AFFINITY == 1 )
1054 if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1057 /* The task is already running on this core, mark it as scheduled. */
1058 pxTCB->xTaskRunState = xCoreID;
1059 xTaskScheduled = pdTRUE;
1064 /* This task is running on the core other than xCoreID. */
1065 mtCOVERAGE_TEST_MARKER();
1068 if( xTaskScheduled != pdFALSE )
1070 /* A task has been selected to run on this core. */
1077 if( xDecrementTopPriority != pdFALSE )
1079 uxTopReadyPriority--;
1080 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1082 xPriorityDropped = pdTRUE;
1088 /* There are configNUMBER_OF_CORES Idle tasks created when scheduler started.
1089 * The scheduler should be able to select a task to run when uxCurrentPriority
1090 * is tskIDLE_PRIORITY. uxCurrentPriority is never decreased to value blow
1091 * tskIDLE_PRIORITY. */
1092 if( uxCurrentPriority > tskIDLE_PRIORITY )
1094 uxCurrentPriority--;
1098 /* This function is called when idle task is not created. Break the
1099 * loop to prevent uxCurrentPriority overrun. */
1104 #if ( configRUN_MULTIPLE_PRIORITIES == 0 )
1106 if( xTaskScheduled == pdTRUE )
1108 if( xPriorityDropped != pdFALSE )
1110 /* There may be several ready tasks that were being prevented from running because there was
1111 * a higher priority task running. Now that the last of the higher priority tasks is no longer
1112 * running, make sure all the other idle tasks yield. */
1115 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ )
1117 if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 )
1125 #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */
1127 #if ( configUSE_CORE_AFFINITY == 1 )
1129 if( xTaskScheduled == pdTRUE )
1131 if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) )
1133 /* A ready task was just evicted from this core. See if it can be
1134 * scheduled on any other core. */
1135 UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask;
1136 BaseType_t xLowestPriority = ( BaseType_t ) pxPreviousTCB->uxPriority;
1137 BaseType_t xLowestPriorityCore = -1;
1140 if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1142 xLowestPriority = xLowestPriority - 1;
1145 if( ( uxCoreMap & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
1147 /* The ready task that was removed from this core is not excluded from it.
1148 * Only look at the intersection of the cores the removed task is allowed to run
1149 * on with the cores that the new task is excluded from. It is possible that the
1150 * new task was only placed onto this core because it is excluded from another.
1151 * Check to see if the previous task could run on one of those cores. */
1152 uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask );
1156 /* The ready task that was removed from this core is excluded from it. */
1159 uxCoreMap &= ( ( 1U << configNUMBER_OF_CORES ) - 1U );
1161 for( x = ( ( BaseType_t ) configNUMBER_OF_CORES - 1 ); x >= ( BaseType_t ) 0; x-- )
1163 UBaseType_t uxCore = ( UBaseType_t ) x;
1164 BaseType_t xTaskPriority;
1166 if( ( uxCoreMap & ( ( UBaseType_t ) 1U << uxCore ) ) != 0U )
1168 xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority;
1170 if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1172 xTaskPriority = xTaskPriority - ( BaseType_t ) 1;
1175 uxCoreMap &= ~( ( UBaseType_t ) 1U << uxCore );
1177 if( ( xTaskPriority < xLowestPriority ) &&
1178 ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) &&
1179 ( xYieldPendings[ uxCore ] == pdFALSE ) )
1181 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
1182 if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE )
1185 xLowestPriority = xTaskPriority;
1186 xLowestPriorityCore = ( BaseType_t ) uxCore;
1192 if( xLowestPriorityCore >= 0 )
1194 prvYieldCore( xLowestPriorityCore );
1199 #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */
1202 #endif /* ( configNUMBER_OF_CORES > 1 ) */
1204 /*-----------------------------------------------------------*/
1206 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
1208 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
1209 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
1210 const uint32_t ulStackDepth,
1211 void * const pvParameters,
1212 UBaseType_t uxPriority,
1213 StackType_t * const puxStackBuffer,
1214 StaticTask_t * const pxTaskBuffer )
1215 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1217 return xTaskCreateStaticAffinitySet( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY );
1220 TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode,
1221 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
1222 const uint32_t ulStackDepth,
1223 void * const pvParameters,
1224 UBaseType_t uxPriority,
1225 StackType_t * const puxStackBuffer,
1226 StaticTask_t * const pxTaskBuffer,
1227 UBaseType_t uxCoreAffinityMask )
1228 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1231 TaskHandle_t xReturn;
1233 traceENTER_xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer );
1235 configASSERT( puxStackBuffer != NULL );
1236 configASSERT( pxTaskBuffer != NULL );
1238 #if ( configASSERT_DEFINED == 1 )
1240 /* Sanity check that the size of the structure used to declare a
1241 * variable of type StaticTask_t equals the size of the real task
1243 volatile size_t xSize = sizeof( StaticTask_t );
1244 configASSERT( xSize == sizeof( TCB_t ) );
1245 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
1247 #endif /* configASSERT_DEFINED */
1249 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
1251 /* The memory used for the task's TCB and stack are passed into this
1252 * function - use them. */
1253 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
1254 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1255 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
1257 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
1259 /* Tasks can be created statically or dynamically, so note this
1260 * task was created statically in case the task is later deleted. */
1261 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
1263 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1265 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
1267 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1269 /* Set the task's affinity before scheduling it. */
1270 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1274 prvAddNewTaskToReadyList( pxNewTCB );
1281 traceRETURN_xTaskCreateStatic( xReturn );
1286 #endif /* SUPPORT_STATIC_ALLOCATION */
1287 /*-----------------------------------------------------------*/
1289 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
1291 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
1292 TaskHandle_t * pxCreatedTask )
1293 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1295 return xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask );
1298 BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition,
1299 UBaseType_t uxCoreAffinityMask,
1300 TaskHandle_t * pxCreatedTask )
1301 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1304 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1306 traceENTER_xTaskCreateRestrictedStatic( pxTaskDefinition, pxCreatedTask );
1308 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
1309 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
1311 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
1313 /* Allocate space for the TCB. Where the memory comes from depends
1314 * on the implementation of the port malloc function and whether or
1315 * not static allocation is being used. */
1316 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
1317 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1319 /* Store the stack location in the TCB. */
1320 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
1322 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1324 /* Tasks can be created statically or dynamically, so note this
1325 * task was created statically in case the task is later deleted. */
1326 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
1328 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1330 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
1331 pxTaskDefinition->pcName,
1332 ( uint32_t ) pxTaskDefinition->usStackDepth,
1333 pxTaskDefinition->pvParameters,
1334 pxTaskDefinition->uxPriority,
1335 pxCreatedTask, pxNewTCB,
1336 pxTaskDefinition->xRegions );
1338 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1340 /* Set the task's affinity before scheduling it. */
1341 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1345 prvAddNewTaskToReadyList( pxNewTCB );
1349 traceRETURN_xTaskCreateRestrictedStatic( xReturn );
1354 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
1355 /*-----------------------------------------------------------*/
1357 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
1359 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
1360 TaskHandle_t * pxCreatedTask )
1361 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1363 return xTaskCreateRestrictedAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask );
1366 BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition,
1367 UBaseType_t uxCoreAffinityMask,
1368 TaskHandle_t * pxCreatedTask )
1369 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1372 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1374 traceENTER_xTaskCreateRestricted( pxTaskDefinition, pxCreatedTask );
1376 configASSERT( pxTaskDefinition->puxStackBuffer );
1378 if( pxTaskDefinition->puxStackBuffer != NULL )
1380 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1382 if( pxNewTCB != NULL )
1384 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1386 /* Store the stack location in the TCB. */
1387 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
1389 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
1391 /* Tasks can be created statically or dynamically, so note
1392 * this task had a statically allocated stack in case it is
1393 * later deleted. The TCB was allocated dynamically. */
1394 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
1396 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1398 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
1399 pxTaskDefinition->pcName,
1400 ( uint32_t ) pxTaskDefinition->usStackDepth,
1401 pxTaskDefinition->pvParameters,
1402 pxTaskDefinition->uxPriority,
1403 pxCreatedTask, pxNewTCB,
1404 pxTaskDefinition->xRegions );
1406 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1408 /* Set the task's affinity before scheduling it. */
1409 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1413 prvAddNewTaskToReadyList( pxNewTCB );
1418 traceRETURN_xTaskCreateRestricted( xReturn );
1423 #endif /* portUSING_MPU_WRAPPERS */
1424 /*-----------------------------------------------------------*/
1426 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
1428 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
1429 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
1430 const configSTACK_DEPTH_TYPE usStackDepth,
1431 void * const pvParameters,
1432 UBaseType_t uxPriority,
1433 TaskHandle_t * const pxCreatedTask )
1434 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1436 return xTaskCreateAffinitySet( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, tskNO_AFFINITY, pxCreatedTask );
1439 BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode,
1440 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
1441 const configSTACK_DEPTH_TYPE usStackDepth,
1442 void * const pvParameters,
1443 UBaseType_t uxPriority,
1444 UBaseType_t uxCoreAffinityMask,
1445 TaskHandle_t * const pxCreatedTask )
1446 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
1451 traceENTER_xTaskCreate( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask );
1453 /* If the stack grows down then allocate the stack then the TCB so the stack
1454 * does not grow into the TCB. Likewise if the stack grows up then allocate
1455 * the TCB then the stack. */
1456 #if ( portSTACK_GROWTH > 0 )
1458 /* Allocate space for the TCB. Where the memory comes from depends on
1459 * the implementation of the port malloc function and whether or not static
1460 * allocation is being used. */
1461 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
1463 if( pxNewTCB != NULL )
1465 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1467 /* Allocate space for the stack used by the task being created.
1468 * The base of the stack memory stored in the TCB so the task can
1469 * be deleted later if required. */
1470 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1472 if( pxNewTCB->pxStack == NULL )
1474 /* Could not allocate the stack. Delete the allocated TCB. */
1475 vPortFree( pxNewTCB );
1480 #else /* portSTACK_GROWTH */
1482 StackType_t * pxStack;
1484 /* Allocate space for the stack used by the task being created. */
1485 pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
1487 if( pxStack != NULL )
1489 /* Allocate space for the TCB. */
1490 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
1492 if( pxNewTCB != NULL )
1494 ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
1496 /* Store the stack location in the TCB. */
1497 pxNewTCB->pxStack = pxStack;
1501 /* The stack cannot be used as the TCB was not created. Free
1503 vPortFreeStack( pxStack );
1511 #endif /* portSTACK_GROWTH */
1513 if( pxNewTCB != NULL )
1515 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
1517 /* Tasks can be created statically or dynamically, so note this
1518 * task was created dynamically in case it is later deleted. */
1519 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
1521 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
1523 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
1525 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1527 /* Set the task's affinity before scheduling it. */
1528 pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask;
1532 prvAddNewTaskToReadyList( pxNewTCB );
1537 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
1540 traceRETURN_xTaskCreate( xReturn );
1545 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1546 /*-----------------------------------------------------------*/
1548 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
1549 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
1550 const uint32_t ulStackDepth,
1551 void * const pvParameters,
1552 UBaseType_t uxPriority,
1553 TaskHandle_t * const pxCreatedTask,
1555 const MemoryRegion_t * const xRegions )
1557 StackType_t * pxTopOfStack;
1560 #if ( portUSING_MPU_WRAPPERS == 1 )
1561 /* Should the task be created in privileged mode? */
1562 BaseType_t xRunPrivileged;
1564 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
1566 xRunPrivileged = pdTRUE;
1570 xRunPrivileged = pdFALSE;
1572 uxPriority &= ~portPRIVILEGE_BIT;
1573 #endif /* portUSING_MPU_WRAPPERS == 1 */
1575 /* Avoid dependency on memset() if it is not required. */
1576 #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
1578 /* Fill the stack with a known value to assist debugging. */
1579 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
1581 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
1583 /* Calculate the top of stack address. This depends on whether the stack
1584 * grows from high memory to low (as per the 80x86) or vice versa.
1585 * portSTACK_GROWTH is used to make the result positive or negative as required
1587 #if ( portSTACK_GROWTH < 0 )
1589 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
1590 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
1592 /* Check the alignment of the calculated top of stack is correct. */
1593 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
1595 #if ( configRECORD_STACK_HIGH_ADDRESS == 1 )
1597 /* Also record the stack's high address, which may assist
1599 pxNewTCB->pxEndOfStack = pxTopOfStack;
1601 #endif /* configRECORD_STACK_HIGH_ADDRESS */
1603 #else /* portSTACK_GROWTH */
1605 pxTopOfStack = pxNewTCB->pxStack;
1606 pxTopOfStack = ( StackType_t * ) ( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) + portBYTE_ALIGNMENT_MASK ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
1608 /* Check the alignment of the calculated top of stack is correct. */
1609 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
1611 /* The other extreme of the stack space is required if stack checking is
1613 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
1615 #endif /* portSTACK_GROWTH */
1617 /* Store the task name in the TCB. */
1618 if( pcName != NULL )
1620 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
1622 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
1624 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
1625 * configMAX_TASK_NAME_LEN characters just in case the memory after the
1626 * string is not accessible (extremely unlikely). */
1627 if( pcName[ x ] == ( char ) 0x00 )
1633 mtCOVERAGE_TEST_MARKER();
1637 /* Ensure the name string is terminated in the case that the string length
1638 * was greater or equal to configMAX_TASK_NAME_LEN. */
1639 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
1643 mtCOVERAGE_TEST_MARKER();
1646 /* This is used as an array index so must ensure it's not too large. */
1647 configASSERT( uxPriority < configMAX_PRIORITIES );
1649 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1651 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1655 mtCOVERAGE_TEST_MARKER();
1658 pxNewTCB->uxPriority = uxPriority;
1659 #if ( configUSE_MUTEXES == 1 )
1661 pxNewTCB->uxBasePriority = uxPriority;
1663 #endif /* configUSE_MUTEXES */
1665 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
1666 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
1668 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
1669 * back to the containing TCB from a generic item in a list. */
1670 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
1672 /* Event lists are always in priority order. */
1673 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1674 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
1676 #if ( portUSING_MPU_WRAPPERS == 1 )
1678 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1682 /* Avoid compiler warning about unreferenced parameter. */
1687 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
1689 /* Allocate and initialize memory for the task's TLS Block. */
1690 configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack );
1694 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
1696 pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY;
1700 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
1702 pxNewTCB->xPreemptionDisable = 0;
1706 /* Initialize the TCB stack to look as if the task was already running,
1707 * but had been interrupted by the scheduler. The return address is set
1708 * to the start of the task function. Once the stack has been initialised
1709 * the top of stack variable is updated. */
1710 #if ( portUSING_MPU_WRAPPERS == 1 )
1712 /* If the port has capability to detect stack overflow,
1713 * pass the stack end address to the stack initialization
1714 * function as well. */
1715 #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1717 #if ( portSTACK_GROWTH < 0 )
1719 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1721 #else /* portSTACK_GROWTH */
1723 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1725 #endif /* portSTACK_GROWTH */
1727 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1729 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
1731 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1733 #else /* portUSING_MPU_WRAPPERS */
1735 /* If the port has capability to detect stack overflow,
1736 * pass the stack end address to the stack initialization
1737 * function as well. */
1738 #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1740 #if ( portSTACK_GROWTH < 0 )
1742 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1744 #else /* portSTACK_GROWTH */
1746 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1748 #endif /* portSTACK_GROWTH */
1750 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1752 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1754 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1756 #endif /* portUSING_MPU_WRAPPERS */
1758 /* Initialize task state and task attributes. */
1759 #if ( configNUMBER_OF_CORES > 1 )
1761 pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING;
1763 /* Is this an idle task? */
1764 if( ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvIdleTask ) || ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvMinimalIdleTask ) )
1766 pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE;
1769 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
1771 if( pxCreatedTask != NULL )
1773 /* Pass the handle out in an anonymous way. The handle can be used to
1774 * change the created task's priority, delete the created task, etc.*/
1775 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1779 mtCOVERAGE_TEST_MARKER();
1782 /*-----------------------------------------------------------*/
1784 #if ( configNUMBER_OF_CORES == 1 )
1786 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
1788 /* Ensure interrupts don't access the task lists while the lists are being
1790 taskENTER_CRITICAL();
1792 uxCurrentNumberOfTasks++;
1794 if( pxCurrentTCB == NULL )
1796 /* There are no other tasks, or all the other tasks are in
1797 * the suspended state - make this the current task. */
1798 pxCurrentTCB = pxNewTCB;
1800 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1802 /* This is the first task to be created so do the preliminary
1803 * initialisation required. We will not recover if this call
1804 * fails, but we will report the failure. */
1805 prvInitialiseTaskLists();
1809 mtCOVERAGE_TEST_MARKER();
1814 /* If the scheduler is not already running, make this task the
1815 * current task if it is the highest priority task to be created
1817 if( xSchedulerRunning == pdFALSE )
1819 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
1821 pxCurrentTCB = pxNewTCB;
1825 mtCOVERAGE_TEST_MARKER();
1830 mtCOVERAGE_TEST_MARKER();
1836 #if ( configUSE_TRACE_FACILITY == 1 )
1838 /* Add a counter into the TCB for tracing only. */
1839 pxNewTCB->uxTCBNumber = uxTaskNumber;
1841 #endif /* configUSE_TRACE_FACILITY */
1842 traceTASK_CREATE( pxNewTCB );
1844 prvAddTaskToReadyList( pxNewTCB );
1846 portSETUP_TCB( pxNewTCB );
1848 taskEXIT_CRITICAL();
1850 if( xSchedulerRunning != pdFALSE )
1852 /* If the created task is of a higher priority than the current task
1853 * then it should run now. */
1854 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
1858 mtCOVERAGE_TEST_MARKER();
1862 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
1864 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
1866 /* Ensure interrupts don't access the task lists while the lists are being
1868 taskENTER_CRITICAL();
1870 uxCurrentNumberOfTasks++;
1872 if( xSchedulerRunning == pdFALSE )
1874 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1876 /* This is the first task to be created so do the preliminary
1877 * initialisation required. We will not recover if this call
1878 * fails, but we will report the failure. */
1879 prvInitialiseTaskLists();
1883 mtCOVERAGE_TEST_MARKER();
1886 if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U )
1890 /* Check if a core is free. */
1891 for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
1893 if( pxCurrentTCBs[ xCoreID ] == NULL )
1895 pxNewTCB->xTaskRunState = xCoreID;
1896 pxCurrentTCBs[ xCoreID ] = pxNewTCB;
1901 mtCOVERAGE_TEST_MARKER();
1907 mtCOVERAGE_TEST_MARKER();
1913 #if ( configUSE_TRACE_FACILITY == 1 )
1915 /* Add a counter into the TCB for tracing only. */
1916 pxNewTCB->uxTCBNumber = uxTaskNumber;
1918 #endif /* configUSE_TRACE_FACILITY */
1919 traceTASK_CREATE( pxNewTCB );
1921 prvAddTaskToReadyList( pxNewTCB );
1923 portSETUP_TCB( pxNewTCB );
1925 if( xSchedulerRunning != pdFALSE )
1927 /* If the created task is of a higher priority than another
1928 * currently running task and preemption is on then it should
1930 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxNewTCB );
1934 mtCOVERAGE_TEST_MARKER();
1937 taskEXIT_CRITICAL();
1940 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
1941 /*-----------------------------------------------------------*/
1943 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
1945 static size_t prvSnprintfReturnValueToCharsWritten( int iSnprintfReturnValue,
1948 size_t uxCharsWritten;
1950 if( iSnprintfReturnValue < 0 )
1952 /* Encoding error - Return 0 to indicate that nothing
1953 * was written to the buffer. */
1956 else if( iSnprintfReturnValue >= ( int ) n )
1958 /* This is the case when the supplied buffer is not
1959 * large to hold the generated string. Return the
1960 * number of characters actually written without
1961 * counting the terminating NULL character. */
1962 uxCharsWritten = n - 1;
1966 /* Complete string was written to the buffer. */
1967 uxCharsWritten = ( size_t ) iSnprintfReturnValue;
1970 return uxCharsWritten;
1973 #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
1974 /*-----------------------------------------------------------*/
1976 #if ( INCLUDE_vTaskDelete == 1 )
1978 void vTaskDelete( TaskHandle_t xTaskToDelete )
1982 traceENTER_vTaskDelete( xTaskToDelete );
1984 taskENTER_CRITICAL();
1986 /* If null is passed in here then it is the calling task that is
1988 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1990 /* Remove task from the ready/delayed list. */
1991 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1993 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1997 mtCOVERAGE_TEST_MARKER();
2000 /* Is the task waiting on an event also? */
2001 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2003 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2007 mtCOVERAGE_TEST_MARKER();
2010 /* Increment the uxTaskNumber also so kernel aware debuggers can
2011 * detect that the task lists need re-generating. This is done before
2012 * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
2016 /* If the task is running (or yielding), we must add it to the
2017 * termination list so that an idle task can delete it when it is
2018 * no longer running. */
2019 if( taskTASK_IS_RUNNING_OR_SCHEDULED_TO_YIELD( pxTCB ) != pdFALSE )
2021 /* A running task or a task which is scheduled to yield is being
2022 * deleted. This cannot complete when the task is still running
2023 * on a core, as a context switch to another task is required.
2024 * Place the task in the termination list. The idle task will check
2025 * the termination list and free up any memory allocated by the
2026 * scheduler for the TCB and stack of the deleted task. */
2027 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
2029 /* Increment the ucTasksDeleted variable so the idle task knows
2030 * there is a task that has been deleted and that it should therefore
2031 * check the xTasksWaitingTermination list. */
2032 ++uxDeletedTasksWaitingCleanUp;
2034 /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
2035 * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
2036 traceTASK_DELETE( pxTCB );
2038 /* The pre-delete hook is primarily for the Windows simulator,
2039 * in which Windows specific clean up operations are performed,
2040 * after which it is not possible to yield away from this task -
2041 * hence xYieldPending is used to latch that a context switch is
2043 #if ( configNUMBER_OF_CORES == 1 )
2044 portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ 0 ] ) );
2046 portPRE_TASK_DELETE_HOOK( pxTCB, &( xYieldPendings[ pxTCB->xTaskRunState ] ) );
2051 --uxCurrentNumberOfTasks;
2052 traceTASK_DELETE( pxTCB );
2054 /* Reset the next expected unblock time in case it referred to
2055 * the task that has just been deleted. */
2056 prvResetNextTaskUnblockTime();
2060 #if ( configNUMBER_OF_CORES == 1 )
2062 taskEXIT_CRITICAL();
2064 /* If the task is not deleting itself, call prvDeleteTCB from outside of
2065 * critical section. If a task deletes itself, prvDeleteTCB is called
2066 * from prvCheckTasksWaitingTermination which is called from Idle task. */
2067 if( pxTCB != pxCurrentTCB )
2069 prvDeleteTCB( pxTCB );
2072 /* Force a reschedule if it is the currently running task that has just
2074 if( xSchedulerRunning != pdFALSE )
2076 if( pxTCB == pxCurrentTCB )
2078 configASSERT( uxSchedulerSuspended == 0 );
2079 portYIELD_WITHIN_API();
2083 mtCOVERAGE_TEST_MARKER();
2087 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2089 /* If a running task is not deleting itself, call prvDeleteTCB. If a running
2090 * task deletes itself, prvDeleteTCB is called from prvCheckTasksWaitingTermination
2091 * which is called from Idle task. */
2092 if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
2094 prvDeleteTCB( pxTCB );
2097 /* Force a reschedule if the task that has just been deleted was running. */
2098 if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) )
2100 if( pxTCB->xTaskRunState == ( BaseType_t ) portGET_CORE_ID() )
2102 configASSERT( uxSchedulerSuspended == 0 );
2103 vTaskYieldWithinAPI();
2107 prvYieldCore( pxTCB->xTaskRunState );
2111 taskEXIT_CRITICAL();
2113 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2115 traceRETURN_vTaskDelete();
2118 #endif /* INCLUDE_vTaskDelete */
2119 /*-----------------------------------------------------------*/
2121 #if ( INCLUDE_xTaskDelayUntil == 1 )
2123 BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
2124 const TickType_t xTimeIncrement )
2126 TickType_t xTimeToWake;
2127 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
2129 traceENTER_xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement );
2131 configASSERT( pxPreviousWakeTime );
2132 configASSERT( ( xTimeIncrement > 0U ) );
2136 /* Minor optimisation. The tick count cannot change in this
2138 const TickType_t xConstTickCount = xTickCount;
2140 configASSERT( uxSchedulerSuspended == 1U );
2142 /* Generate the tick time at which the task wants to wake. */
2143 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
2145 if( xConstTickCount < *pxPreviousWakeTime )
2147 /* The tick count has overflowed since this function was
2148 * lasted called. In this case the only time we should ever
2149 * actually delay is if the wake time has also overflowed,
2150 * and the wake time is greater than the tick time. When this
2151 * is the case it is as if neither time had overflowed. */
2152 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
2154 xShouldDelay = pdTRUE;
2158 mtCOVERAGE_TEST_MARKER();
2163 /* The tick time has not overflowed. In this case we will
2164 * delay if either the wake time has overflowed, and/or the
2165 * tick time is less than the wake time. */
2166 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
2168 xShouldDelay = pdTRUE;
2172 mtCOVERAGE_TEST_MARKER();
2176 /* Update the wake time ready for the next call. */
2177 *pxPreviousWakeTime = xTimeToWake;
2179 if( xShouldDelay != pdFALSE )
2181 traceTASK_DELAY_UNTIL( xTimeToWake );
2183 /* prvAddCurrentTaskToDelayedList() needs the block time, not
2184 * the time to wake, so subtract the current tick count. */
2185 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
2189 mtCOVERAGE_TEST_MARKER();
2192 xAlreadyYielded = xTaskResumeAll();
2194 /* Force a reschedule if xTaskResumeAll has not already done so, we may
2195 * have put ourselves to sleep. */
2196 if( xAlreadyYielded == pdFALSE )
2198 taskYIELD_WITHIN_API();
2202 mtCOVERAGE_TEST_MARKER();
2205 traceRETURN_xTaskDelayUntil( xShouldDelay );
2207 return xShouldDelay;
2210 #endif /* INCLUDE_xTaskDelayUntil */
2211 /*-----------------------------------------------------------*/
2213 #if ( INCLUDE_vTaskDelay == 1 )
2215 void vTaskDelay( const TickType_t xTicksToDelay )
2217 BaseType_t xAlreadyYielded = pdFALSE;
2219 traceENTER_vTaskDelay( xTicksToDelay );
2221 /* A delay time of zero just forces a reschedule. */
2222 if( xTicksToDelay > ( TickType_t ) 0U )
2226 configASSERT( uxSchedulerSuspended == 1U );
2230 /* A task that is removed from the event list while the
2231 * scheduler is suspended will not get placed in the ready
2232 * list or removed from the blocked list until the scheduler
2235 * This task cannot be in an event list as it is the currently
2236 * executing task. */
2237 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
2239 xAlreadyYielded = xTaskResumeAll();
2243 mtCOVERAGE_TEST_MARKER();
2246 /* Force a reschedule if xTaskResumeAll has not already done so, we may
2247 * have put ourselves to sleep. */
2248 if( xAlreadyYielded == pdFALSE )
2250 taskYIELD_WITHIN_API();
2254 mtCOVERAGE_TEST_MARKER();
2257 traceRETURN_vTaskDelay();
2260 #endif /* INCLUDE_vTaskDelay */
2261 /*-----------------------------------------------------------*/
2263 #if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
2265 eTaskState eTaskGetState( TaskHandle_t xTask )
2268 List_t const * pxStateList;
2269 List_t const * pxEventList;
2270 List_t const * pxDelayedList;
2271 List_t const * pxOverflowedDelayedList;
2272 const TCB_t * const pxTCB = xTask;
2274 traceENTER_eTaskGetState( xTask );
2276 configASSERT( pxTCB );
2278 #if ( configNUMBER_OF_CORES == 1 )
2279 if( pxTCB == pxCurrentTCB )
2281 /* The task calling this function is querying its own state. */
2287 taskENTER_CRITICAL();
2289 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
2290 pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) );
2291 pxDelayedList = pxDelayedTaskList;
2292 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
2294 taskEXIT_CRITICAL();
2296 if( pxEventList == &xPendingReadyList )
2298 /* The task has been placed on the pending ready list, so its
2299 * state is eReady regardless of what list the task's state list
2300 * item is currently placed on. */
2303 else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
2305 /* The task being queried is referenced from one of the Blocked
2310 #if ( INCLUDE_vTaskSuspend == 1 )
2311 else if( pxStateList == &xSuspendedTaskList )
2313 /* The task being queried is referenced from the suspended
2314 * list. Is it genuinely suspended or is it blocked
2316 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
2318 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
2322 /* The task does not appear on the event list item of
2323 * and of the RTOS objects, but could still be in the
2324 * blocked state if it is waiting on its notification
2325 * rather than waiting on an object. If not, is
2327 eReturn = eSuspended;
2329 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
2331 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
2338 #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2340 eReturn = eSuspended;
2342 #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2349 #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
2351 #if ( INCLUDE_vTaskDelete == 1 )
2352 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
2354 /* The task being queried is referenced from the deleted
2355 * tasks list, or it is not referenced from any lists at
2361 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
2363 #if ( configNUMBER_OF_CORES == 1 )
2365 /* If the task is not in any other state, it must be in the
2366 * Ready (including pending ready) state. */
2369 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2371 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2373 /* Is it actively running on a core? */
2378 /* If the task is not in any other state, it must be in the
2379 * Ready (including pending ready) state. */
2383 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2387 traceRETURN_eTaskGetState( eReturn );
2390 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
2392 #endif /* INCLUDE_eTaskGetState */
2393 /*-----------------------------------------------------------*/
2395 #if ( INCLUDE_uxTaskPriorityGet == 1 )
2397 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
2399 TCB_t const * pxTCB;
2400 UBaseType_t uxReturn;
2402 traceENTER_uxTaskPriorityGet( xTask );
2404 taskENTER_CRITICAL();
2406 /* If null is passed in here then it is the priority of the task
2407 * that called uxTaskPriorityGet() that is being queried. */
2408 pxTCB = prvGetTCBFromHandle( xTask );
2409 uxReturn = pxTCB->uxPriority;
2411 taskEXIT_CRITICAL();
2413 traceRETURN_uxTaskPriorityGet( uxReturn );
2418 #endif /* INCLUDE_uxTaskPriorityGet */
2419 /*-----------------------------------------------------------*/
2421 #if ( INCLUDE_uxTaskPriorityGet == 1 )
2423 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
2425 TCB_t const * pxTCB;
2426 UBaseType_t uxReturn;
2427 UBaseType_t uxSavedInterruptStatus;
2429 traceENTER_uxTaskPriorityGetFromISR( xTask );
2431 /* RTOS ports that support interrupt nesting have the concept of a
2432 * maximum system call (or maximum API call) interrupt priority.
2433 * Interrupts that are above the maximum system call priority are keep
2434 * permanently enabled, even when the RTOS kernel is in a critical section,
2435 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
2436 * is defined in FreeRTOSConfig.h then
2437 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2438 * failure if a FreeRTOS API function is called from an interrupt that has
2439 * been assigned a priority above the configured maximum system call
2440 * priority. Only FreeRTOS functions that end in FromISR can be called
2441 * from interrupts that have been assigned a priority at or (logically)
2442 * below the maximum system call interrupt priority. FreeRTOS maintains a
2443 * separate interrupt safe API to ensure interrupt entry is as fast and as
2444 * simple as possible. More information (albeit Cortex-M specific) is
2445 * provided on the following link:
2446 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2447 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2449 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2451 /* If null is passed in here then it is the priority of the calling
2452 * task that is being queried. */
2453 pxTCB = prvGetTCBFromHandle( xTask );
2454 uxReturn = pxTCB->uxPriority;
2456 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2458 traceRETURN_uxTaskPriorityGetFromISR( uxReturn );
2463 #endif /* INCLUDE_uxTaskPriorityGet */
2464 /*-----------------------------------------------------------*/
2466 #if ( INCLUDE_vTaskPrioritySet == 1 )
2468 void vTaskPrioritySet( TaskHandle_t xTask,
2469 UBaseType_t uxNewPriority )
2472 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
2473 BaseType_t xYieldRequired = pdFALSE;
2475 traceENTER_vTaskPrioritySet( xTask, uxNewPriority );
2477 #if ( configNUMBER_OF_CORES > 1 )
2478 BaseType_t xYieldForTask = pdFALSE;
2481 configASSERT( uxNewPriority < configMAX_PRIORITIES );
2483 /* Ensure the new priority is valid. */
2484 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
2486 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
2490 mtCOVERAGE_TEST_MARKER();
2493 taskENTER_CRITICAL();
2495 /* If null is passed in here then it is the priority of the calling
2496 * task that is being changed. */
2497 pxTCB = prvGetTCBFromHandle( xTask );
2499 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
2501 #if ( configUSE_MUTEXES == 1 )
2503 uxCurrentBasePriority = pxTCB->uxBasePriority;
2507 uxCurrentBasePriority = pxTCB->uxPriority;
2511 if( uxCurrentBasePriority != uxNewPriority )
2513 /* The priority change may have readied a task of higher
2514 * priority than a running task. */
2515 if( uxNewPriority > uxCurrentBasePriority )
2517 #if ( configNUMBER_OF_CORES == 1 )
2519 if( pxTCB != pxCurrentTCB )
2521 /* The priority of a task other than the currently
2522 * running task is being raised. Is the priority being
2523 * raised above that of the running task? */
2524 if( uxNewPriority > pxCurrentTCB->uxPriority )
2526 xYieldRequired = pdTRUE;
2530 mtCOVERAGE_TEST_MARKER();
2535 /* The priority of the running task is being raised,
2536 * but the running task must already be the highest
2537 * priority task able to run so no yield is required. */
2540 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2542 /* The priority of a task is being raised so
2543 * perform a yield for this task later. */
2544 xYieldForTask = pdTRUE;
2546 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2548 else if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2550 /* Setting the priority of a running task down means
2551 * there may now be another task of higher priority that
2552 * is ready to execute. */
2553 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
2554 if( pxTCB->xPreemptionDisable == pdFALSE )
2557 xYieldRequired = pdTRUE;
2562 /* Setting the priority of any other task down does not
2563 * require a yield as the running task must be above the
2564 * new priority of the task being modified. */
2567 /* Remember the ready list the task might be referenced from
2568 * before its uxPriority member is changed so the
2569 * taskRESET_READY_PRIORITY() macro can function correctly. */
2570 uxPriorityUsedOnEntry = pxTCB->uxPriority;
2572 #if ( configUSE_MUTEXES == 1 )
2574 /* Only change the priority being used if the task is not
2575 * currently using an inherited priority or the new priority
2576 * is bigger than the inherited priority. */
2577 if( ( pxTCB->uxBasePriority == pxTCB->uxPriority ) || ( uxNewPriority > pxTCB->uxPriority ) )
2579 pxTCB->uxPriority = uxNewPriority;
2583 mtCOVERAGE_TEST_MARKER();
2586 /* The base priority gets set whatever. */
2587 pxTCB->uxBasePriority = uxNewPriority;
2589 #else /* if ( configUSE_MUTEXES == 1 ) */
2591 pxTCB->uxPriority = uxNewPriority;
2593 #endif /* if ( configUSE_MUTEXES == 1 ) */
2595 /* Only reset the event list item value if the value is not
2596 * being used for anything else. */
2597 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
2599 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2603 mtCOVERAGE_TEST_MARKER();
2606 /* If the task is in the blocked or suspended list we need do
2607 * nothing more than change its priority variable. However, if
2608 * the task is in a ready list it needs to be removed and placed
2609 * in the list appropriate to its new priority. */
2610 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
2612 /* The task is currently in its ready list - remove before
2613 * adding it to its new ready list. As we are in a critical
2614 * section we can do this even if the scheduler is suspended. */
2615 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
2617 /* It is known that the task is in its ready list so
2618 * there is no need to check again and the port level
2619 * reset macro can be called directly. */
2620 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
2624 mtCOVERAGE_TEST_MARKER();
2627 prvAddTaskToReadyList( pxTCB );
2631 #if ( configNUMBER_OF_CORES == 1 )
2633 mtCOVERAGE_TEST_MARKER();
2637 /* It's possible that xYieldForTask was already set to pdTRUE because
2638 * its priority is being raised. However, since it is not in a ready list
2639 * we don't actually need to yield for it. */
2640 xYieldForTask = pdFALSE;
2645 if( xYieldRequired != pdFALSE )
2647 /* The running task priority is set down. Request the task to yield. */
2648 taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxTCB );
2652 #if ( configNUMBER_OF_CORES > 1 )
2653 if( xYieldForTask != pdFALSE )
2655 /* The priority of the task is being raised. If a running
2656 * task has priority lower than this task, it should yield
2658 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
2661 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
2663 mtCOVERAGE_TEST_MARKER();
2667 /* Remove compiler warning about unused variables when the port
2668 * optimised task selection is not being used. */
2669 ( void ) uxPriorityUsedOnEntry;
2672 taskEXIT_CRITICAL();
2674 traceRETURN_vTaskPrioritySet();
2677 #endif /* INCLUDE_vTaskPrioritySet */
2678 /*-----------------------------------------------------------*/
2680 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
2681 void vTaskCoreAffinitySet( const TaskHandle_t xTask,
2682 UBaseType_t uxCoreAffinityMask )
2686 UBaseType_t uxPrevCoreAffinityMask;
2688 #if ( configUSE_PREEMPTION == 1 )
2689 UBaseType_t uxPrevNotAllowedCores;
2692 traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask );
2694 taskENTER_CRITICAL();
2696 pxTCB = prvGetTCBFromHandle( xTask );
2698 uxPrevCoreAffinityMask = pxTCB->uxCoreAffinityMask;
2699 pxTCB->uxCoreAffinityMask = uxCoreAffinityMask;
2701 if( xSchedulerRunning != pdFALSE )
2703 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2705 xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
2707 /* If the task can no longer run on the core it was running,
2708 * request the core to yield. */
2709 if( ( uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) == 0U )
2711 prvYieldCore( xCoreID );
2716 #if ( configUSE_PREEMPTION == 1 )
2718 /* Calculate the cores on which this task was not allowed to
2719 * run previously. */
2720 uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1U << configNUMBER_OF_CORES ) - 1U );
2722 /* Does the new core mask enables this task to run on any of the
2723 * previously not allowed cores? If yes, check if this task can be
2724 * scheduled on any of those cores. */
2725 if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U )
2727 prvYieldForTask( pxTCB );
2730 #else /* #if( configUSE_PREEMPTION == 1 ) */
2732 mtCOVERAGE_TEST_MARKER();
2734 #endif /* #if( configUSE_PREEMPTION == 1 ) */
2738 taskEXIT_CRITICAL();
2740 traceRETURN_vTaskCoreAffinitySet();
2742 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
2743 /*-----------------------------------------------------------*/
2745 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
2746 UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask )
2748 const TCB_t * pxTCB;
2749 UBaseType_t uxCoreAffinityMask;
2751 traceENTER_vTaskCoreAffinityGet( xTask );
2753 taskENTER_CRITICAL();
2755 pxTCB = prvGetTCBFromHandle( xTask );
2756 uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
2758 taskEXIT_CRITICAL();
2760 traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask );
2762 return uxCoreAffinityMask;
2764 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */
2766 /*-----------------------------------------------------------*/
2768 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
2770 void vTaskPreemptionDisable( const TaskHandle_t xTask )
2774 traceENTER_vTaskPreemptionDisable( xTask );
2776 taskENTER_CRITICAL();
2778 pxTCB = prvGetTCBFromHandle( xTask );
2780 pxTCB->xPreemptionDisable = pdTRUE;
2782 taskEXIT_CRITICAL();
2784 traceRETURN_vTaskPreemptionDisable();
2787 #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
2788 /*-----------------------------------------------------------*/
2790 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
2792 void vTaskPreemptionEnable( const TaskHandle_t xTask )
2797 traceENTER_vTaskPreemptionEnable( xTask );
2799 taskENTER_CRITICAL();
2801 pxTCB = prvGetTCBFromHandle( xTask );
2803 pxTCB->xPreemptionDisable = pdFALSE;
2805 if( xSchedulerRunning != pdFALSE )
2807 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2809 xCoreID = ( BaseType_t ) pxTCB->xTaskRunState;
2810 prvYieldCore( xCoreID );
2814 taskEXIT_CRITICAL();
2816 traceRETURN_vTaskPreemptionEnable();
2819 #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
2820 /*-----------------------------------------------------------*/
2822 #if ( INCLUDE_vTaskSuspend == 1 )
2824 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
2828 #if ( configNUMBER_OF_CORES > 1 )
2829 BaseType_t xTaskRunningOnCore;
2832 traceENTER_vTaskSuspend( xTaskToSuspend );
2834 taskENTER_CRITICAL();
2836 /* If null is passed in here then it is the running task that is
2837 * being suspended. */
2838 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
2840 traceTASK_SUSPEND( pxTCB );
2842 #if ( configNUMBER_OF_CORES > 1 )
2843 xTaskRunningOnCore = pxTCB->xTaskRunState;
2846 /* Remove task from the ready/delayed list and place in the
2847 * suspended list. */
2848 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
2850 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
2854 mtCOVERAGE_TEST_MARKER();
2857 /* Is the task waiting on an event also? */
2858 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2860 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2864 mtCOVERAGE_TEST_MARKER();
2867 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
2869 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
2873 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
2875 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
2877 /* The task was blocked to wait for a notification, but is
2878 * now suspended, so no notification was received. */
2879 pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
2883 #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2886 #if ( configNUMBER_OF_CORES == 1 )
2888 taskEXIT_CRITICAL();
2890 if( xSchedulerRunning != pdFALSE )
2892 /* Reset the next expected unblock time in case it referred to the
2893 * task that is now in the Suspended state. */
2894 taskENTER_CRITICAL();
2896 prvResetNextTaskUnblockTime();
2898 taskEXIT_CRITICAL();
2902 mtCOVERAGE_TEST_MARKER();
2905 if( pxTCB == pxCurrentTCB )
2907 if( xSchedulerRunning != pdFALSE )
2909 /* The current task has just been suspended. */
2910 configASSERT( uxSchedulerSuspended == 0 );
2911 portYIELD_WITHIN_API();
2915 /* The scheduler is not running, but the task that was pointed
2916 * to by pxCurrentTCB has just been suspended and pxCurrentTCB
2917 * must be adjusted to point to a different task. */
2918 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
2920 /* No other tasks are ready, so set pxCurrentTCB back to
2921 * NULL so when the next task is created pxCurrentTCB will
2922 * be set to point to it no matter what its relative priority
2924 pxCurrentTCB = NULL;
2928 vTaskSwitchContext();
2934 mtCOVERAGE_TEST_MARKER();
2937 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2939 if( xSchedulerRunning != pdFALSE )
2941 /* Reset the next expected unblock time in case it referred to the
2942 * task that is now in the Suspended state. */
2943 prvResetNextTaskUnblockTime();
2947 mtCOVERAGE_TEST_MARKER();
2950 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
2952 if( xSchedulerRunning != pdFALSE )
2954 if( xTaskRunningOnCore == ( BaseType_t ) portGET_CORE_ID() )
2956 /* The current task has just been suspended. */
2957 configASSERT( uxSchedulerSuspended == 0 );
2958 vTaskYieldWithinAPI();
2962 prvYieldCore( xTaskRunningOnCore );
2967 /* This code path is not possible because only Idle tasks are
2968 * assigned a core before the scheduler is started ( i.e.
2969 * taskTASK_IS_RUNNING is only true for idle tasks before
2970 * the scheduler is started ) and idle tasks cannot be
2972 mtCOVERAGE_TEST_MARKER();
2977 mtCOVERAGE_TEST_MARKER();
2980 taskEXIT_CRITICAL();
2982 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2984 traceRETURN_vTaskSuspend();
2987 #endif /* INCLUDE_vTaskSuspend */
2988 /*-----------------------------------------------------------*/
2990 #if ( INCLUDE_vTaskSuspend == 1 )
2992 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
2994 BaseType_t xReturn = pdFALSE;
2995 const TCB_t * const pxTCB = xTask;
2997 /* Accesses xPendingReadyList so must be called from a critical
3000 /* It does not make sense to check if the calling task is suspended. */
3001 configASSERT( xTask );
3003 /* Is the task being resumed actually in the suspended list? */
3004 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
3006 /* Has the task already been resumed from within an ISR? */
3007 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
3009 /* Is it in the suspended list because it is in the Suspended
3010 * state, or because is is blocked with no timeout? */
3011 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
3017 mtCOVERAGE_TEST_MARKER();
3022 mtCOVERAGE_TEST_MARKER();
3027 mtCOVERAGE_TEST_MARKER();
3031 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
3033 #endif /* INCLUDE_vTaskSuspend */
3034 /*-----------------------------------------------------------*/
3036 #if ( INCLUDE_vTaskSuspend == 1 )
3038 void vTaskResume( TaskHandle_t xTaskToResume )
3040 TCB_t * const pxTCB = xTaskToResume;
3042 traceENTER_vTaskResume( xTaskToResume );
3044 /* It does not make sense to resume the calling task. */
3045 configASSERT( xTaskToResume );
3047 #if ( configNUMBER_OF_CORES == 1 )
3049 /* The parameter cannot be NULL as it is impossible to resume the
3050 * currently executing task. */
3051 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
3054 /* The parameter cannot be NULL as it is impossible to resume the
3055 * currently executing task. It is also impossible to resume a task
3056 * that is actively running on another core but it is not safe
3057 * to check their run state here. Therefore, we get into a critical
3058 * section and check if the task is actually suspended or not. */
3062 taskENTER_CRITICAL();
3064 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
3066 traceTASK_RESUME( pxTCB );
3068 /* The ready list can be accessed even if the scheduler is
3069 * suspended because this is inside a critical section. */
3070 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3071 prvAddTaskToReadyList( pxTCB );
3073 /* This yield may not cause the task just resumed to run,
3074 * but will leave the lists in the correct state for the
3076 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
3080 mtCOVERAGE_TEST_MARKER();
3083 taskEXIT_CRITICAL();
3087 mtCOVERAGE_TEST_MARKER();
3090 traceRETURN_vTaskResume();
3093 #endif /* INCLUDE_vTaskSuspend */
3095 /*-----------------------------------------------------------*/
3097 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
3099 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
3101 BaseType_t xYieldRequired = pdFALSE;
3102 TCB_t * const pxTCB = xTaskToResume;
3103 UBaseType_t uxSavedInterruptStatus;
3105 traceENTER_xTaskResumeFromISR( xTaskToResume );
3107 configASSERT( xTaskToResume );
3109 /* RTOS ports that support interrupt nesting have the concept of a
3110 * maximum system call (or maximum API call) interrupt priority.
3111 * Interrupts that are above the maximum system call priority are keep
3112 * permanently enabled, even when the RTOS kernel is in a critical section,
3113 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
3114 * is defined in FreeRTOSConfig.h then
3115 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
3116 * failure if a FreeRTOS API function is called from an interrupt that has
3117 * been assigned a priority above the configured maximum system call
3118 * priority. Only FreeRTOS functions that end in FromISR can be called
3119 * from interrupts that have been assigned a priority at or (logically)
3120 * below the maximum system call interrupt priority. FreeRTOS maintains a
3121 * separate interrupt safe API to ensure interrupt entry is as fast and as
3122 * simple as possible. More information (albeit Cortex-M specific) is
3123 * provided on the following link:
3124 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
3125 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
3127 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
3129 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
3131 traceTASK_RESUME_FROM_ISR( pxTCB );
3133 /* Check the ready lists can be accessed. */
3134 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
3136 #if ( configNUMBER_OF_CORES == 1 )
3138 /* Ready lists can be accessed so move the task from the
3139 * suspended list to the ready list directly. */
3140 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
3142 xYieldRequired = pdTRUE;
3144 /* Mark that a yield is pending in case the user is not
3145 * using the return value to initiate a context switch
3146 * from the ISR using portYIELD_FROM_ISR. */
3147 xYieldPendings[ 0 ] = pdTRUE;
3151 mtCOVERAGE_TEST_MARKER();
3154 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3156 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3157 prvAddTaskToReadyList( pxTCB );
3161 /* The delayed or ready lists cannot be accessed so the task
3162 * is held in the pending ready list until the scheduler is
3164 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
3167 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) )
3169 prvYieldForTask( pxTCB );
3171 if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
3173 xYieldRequired = pdTRUE;
3176 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */
3180 mtCOVERAGE_TEST_MARKER();
3183 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
3185 traceRETURN_xTaskResumeFromISR( xYieldRequired );
3187 return xYieldRequired;
3190 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
3191 /*-----------------------------------------------------------*/
3193 static BaseType_t prvCreateIdleTasks( void )
3195 BaseType_t xReturn = pdPASS;
3197 #if ( configNUMBER_OF_CORES == 1 )
3199 /* Add the idle task at the lowest priority. */
3200 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
3202 StaticTask_t * pxIdleTaskTCBBuffer = NULL;
3203 StackType_t * pxIdleTaskStackBuffer = NULL;
3204 uint32_t ulIdleTaskStackSize;
3206 /* The Idle task is created using user provided RAM - obtain the
3207 * address of the RAM then create the idle task. */
3208 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
3209 xIdleTaskHandles[ 0 ] = xTaskCreateStatic( prvIdleTask,
3210 configIDLE_TASK_NAME,
3211 ulIdleTaskStackSize,
3212 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
3213 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3214 pxIdleTaskStackBuffer,
3215 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
3217 if( xIdleTaskHandles[ 0 ] != NULL )
3226 #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
3228 /* The Idle task is being created using dynamically allocated RAM. */
3229 xReturn = xTaskCreate( prvIdleTask,
3230 configIDLE_TASK_NAME,
3231 configMINIMAL_STACK_SIZE,
3233 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3234 &xIdleTaskHandles[ 0 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
3236 #endif /* configSUPPORT_STATIC_ALLOCATION */
3238 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3241 char cIdleName[ configMAX_TASK_NAME_LEN ];
3243 /* Add each idle task at the lowest priority. */
3244 for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
3248 if( xReturn == pdFAIL )
3254 mtCOVERAGE_TEST_MARKER();
3257 for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configMAX_TASK_NAME_LEN; x++ )
3259 cIdleName[ x ] = configIDLE_TASK_NAME[ x ];
3261 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
3262 * configMAX_TASK_NAME_LEN characters just in case the memory after the
3263 * string is not accessible (extremely unlikely). */
3264 if( cIdleName[ x ] == ( char ) 0x00 )
3270 mtCOVERAGE_TEST_MARKER();
3274 /* Append the idle task number to the end of the name if there is space. */
3275 if( x < ( BaseType_t ) configMAX_TASK_NAME_LEN )
3277 cIdleName[ x ] = ( char ) ( xCoreID + '0' );
3280 /* And append a null character if there is space. */
3281 if( x < ( BaseType_t ) configMAX_TASK_NAME_LEN )
3283 cIdleName[ x ] = '\0';
3287 mtCOVERAGE_TEST_MARKER();
3292 mtCOVERAGE_TEST_MARKER();
3295 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
3299 StaticTask_t * pxIdleTaskTCBBuffer = NULL;
3300 StackType_t * pxIdleTaskStackBuffer = NULL;
3301 uint32_t ulIdleTaskStackSize;
3303 /* The Idle task is created using user provided RAM - obtain the
3304 * address of the RAM then create the idle task. */
3305 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
3306 xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvIdleTask,
3308 ulIdleTaskStackSize,
3309 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
3310 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3311 pxIdleTaskStackBuffer,
3312 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
3316 xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask,
3318 configMINIMAL_STACK_SIZE,
3319 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
3320 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3321 xIdleTaskStackBuffers[ xCoreID - 1 ],
3322 &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
3325 if( xIdleTaskHandles[ xCoreID ] != NULL )
3334 #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
3338 /* The Idle task is being created using dynamically allocated RAM. */
3339 xReturn = xTaskCreate( prvIdleTask,
3341 configMINIMAL_STACK_SIZE,
3343 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3344 &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
3348 xReturn = xTaskCreate( prvMinimalIdleTask,
3350 configMINIMAL_STACK_SIZE,
3352 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
3353 &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
3356 #endif /* configSUPPORT_STATIC_ALLOCATION */
3359 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3364 /*-----------------------------------------------------------*/
3366 void vTaskStartScheduler( void )
3370 traceENTER_vTaskStartScheduler();
3372 #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 )
3374 /* Sanity check that the UBaseType_t must have greater than or equal to
3375 * the number of bits as confNUMBER_OF_CORES. */
3376 configASSERT( ( sizeof( UBaseType_t ) * taskBITS_PER_BYTE ) >= configNUMBER_OF_CORES );
3378 #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
3380 xReturn = prvCreateIdleTasks();
3382 #if ( configUSE_TIMERS == 1 )
3384 if( xReturn == pdPASS )
3386 xReturn = xTimerCreateTimerTask();
3390 mtCOVERAGE_TEST_MARKER();
3393 #endif /* configUSE_TIMERS */
3395 if( xReturn == pdPASS )
3397 /* freertos_tasks_c_additions_init() should only be called if the user
3398 * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
3399 * the only macro called by the function. */
3400 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
3402 freertos_tasks_c_additions_init();
3406 /* Interrupts are turned off here, to ensure a tick does not occur
3407 * before or during the call to xPortStartScheduler(). The stacks of
3408 * the created tasks contain a status word with interrupts switched on
3409 * so interrupts will automatically get re-enabled when the first task
3411 portDISABLE_INTERRUPTS();
3413 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
3415 /* Switch C-Runtime's TLS Block to point to the TLS
3416 * block specific to the task that will run first. */
3417 configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
3421 xNextTaskUnblockTime = portMAX_DELAY;
3422 xSchedulerRunning = pdTRUE;
3423 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
3425 /* If configGENERATE_RUN_TIME_STATS is defined then the following
3426 * macro must be defined to configure the timer/counter used to generate
3427 * the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
3428 * is set to 0 and the following line fails to build then ensure you do not
3429 * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
3430 * FreeRTOSConfig.h file. */
3431 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
3433 traceTASK_SWITCHED_IN();
3435 /* Setting up the timer tick is hardware specific and thus in the
3436 * portable interface. */
3437 xPortStartScheduler();
3439 /* In most cases, xPortStartScheduler() will not return. If it
3440 * returns pdTRUE then there was not enough heap memory available
3441 * to create either the Idle or the Timer task. If it returned
3442 * pdFALSE, then the application called xTaskEndScheduler().
3443 * Most ports don't implement xTaskEndScheduler() as there is
3444 * nothing to return to. */
3448 /* This line will only be reached if the kernel could not be started,
3449 * because there was not enough FreeRTOS heap to create the idle task
3450 * or the timer task. */
3451 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
3454 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
3455 * meaning xIdleTaskHandles are not used anywhere else. */
3456 ( void ) xIdleTaskHandles;
3458 /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority
3459 * from getting optimized out as it is no longer used by the kernel. */
3460 ( void ) uxTopUsedPriority;
3462 traceRETURN_vTaskStartScheduler();
3464 /*-----------------------------------------------------------*/
3466 void vTaskEndScheduler( void )
3468 traceENTER_vTaskEndScheduler();
3470 /* Stop the scheduler interrupts and call the portable scheduler end
3471 * routine so the original ISRs can be restored if necessary. The port
3472 * layer must ensure interrupts enable bit is left in the correct state. */
3473 portDISABLE_INTERRUPTS();
3474 xSchedulerRunning = pdFALSE;
3475 vPortEndScheduler();
3477 traceRETURN_vTaskEndScheduler();
3479 /*----------------------------------------------------------*/
3481 void vTaskSuspendAll( void )
3483 traceENTER_vTaskSuspendAll();
3485 #if ( configNUMBER_OF_CORES == 1 )
3487 /* A critical section is not required as the variable is of type
3488 * BaseType_t. Please read Richard Barry's reply in the following link to a
3489 * post in the FreeRTOS support forum before reporting this as a bug! -
3490 * https://goo.gl/wu4acr */
3492 /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
3493 * do not otherwise exhibit real time behaviour. */
3494 portSOFTWARE_BARRIER();
3496 /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
3497 * is used to allow calls to vTaskSuspendAll() to nest. */
3498 ++uxSchedulerSuspended;
3500 /* Enforces ordering for ports and optimised compilers that may otherwise place
3501 * the above increment elsewhere. */
3502 portMEMORY_BARRIER();
3504 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3506 UBaseType_t ulState;
3508 /* This must only be called from within a task. */
3509 portASSERT_IF_IN_ISR();
3511 if( xSchedulerRunning != pdFALSE )
3513 /* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks.
3514 * We must disable interrupts before we grab the locks in the event that this task is
3515 * interrupted and switches context before incrementing uxSchedulerSuspended.
3516 * It is safe to re-enable interrupts after releasing the ISR lock and incrementing
3517 * uxSchedulerSuspended since that will prevent context switches. */
3518 ulState = portSET_INTERRUPT_MASK();
3520 /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
3521 * do not otherwise exhibit real time behaviour. */
3522 portSOFTWARE_BARRIER();
3524 portGET_TASK_LOCK();
3526 /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The
3527 * purpose is to prevent altering the variable when fromISR APIs are readying
3529 if( uxSchedulerSuspended == 0U )
3531 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
3533 prvCheckForRunStateChange();
3537 mtCOVERAGE_TEST_MARKER();
3542 mtCOVERAGE_TEST_MARKER();
3547 /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
3548 * is used to allow calls to vTaskSuspendAll() to nest. */
3549 ++uxSchedulerSuspended;
3550 portRELEASE_ISR_LOCK();
3552 portCLEAR_INTERRUPT_MASK( ulState );
3556 mtCOVERAGE_TEST_MARKER();
3559 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3561 traceRETURN_vTaskSuspendAll();
3564 /*----------------------------------------------------------*/
3566 #if ( configUSE_TICKLESS_IDLE != 0 )
3568 static TickType_t prvGetExpectedIdleTime( void )
3571 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
3573 /* uxHigherPriorityReadyTasks takes care of the case where
3574 * configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
3575 * task that are in the Ready state, even though the idle task is
3577 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
3579 if( uxTopReadyPriority > tskIDLE_PRIORITY )
3581 uxHigherPriorityReadyTasks = pdTRUE;
3586 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
3588 /* When port optimised task selection is used the uxTopReadyPriority
3589 * variable is used as a bit map. If bits other than the least
3590 * significant bit are set then there are tasks that have a priority
3591 * above the idle priority that are in the Ready state. This takes
3592 * care of the case where the co-operative scheduler is in use. */
3593 if( uxTopReadyPriority > uxLeastSignificantBit )
3595 uxHigherPriorityReadyTasks = pdTRUE;
3598 #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
3600 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
3604 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
3606 /* There are other idle priority tasks in the ready state. If
3607 * time slicing is used then the very next tick interrupt must be
3611 else if( uxHigherPriorityReadyTasks != pdFALSE )
3613 /* There are tasks in the Ready state that have a priority above the
3614 * idle priority. This path can only be reached if
3615 * configUSE_PREEMPTION is 0. */
3620 xReturn = xNextTaskUnblockTime - xTickCount;
3626 #endif /* configUSE_TICKLESS_IDLE */
3627 /*----------------------------------------------------------*/
3629 BaseType_t xTaskResumeAll( void )
3631 TCB_t * pxTCB = NULL;
3632 BaseType_t xAlreadyYielded = pdFALSE;
3634 traceENTER_xTaskResumeAll();
3636 #if ( configNUMBER_OF_CORES > 1 )
3637 if( xSchedulerRunning != pdFALSE )
3640 /* It is possible that an ISR caused a task to be removed from an event
3641 * list while the scheduler was suspended. If this was the case then the
3642 * removed task will have been added to the xPendingReadyList. Once the
3643 * scheduler has been resumed it is safe to move all the pending ready
3644 * tasks from this list into their appropriate ready list. */
3645 taskENTER_CRITICAL();
3648 xCoreID = ( BaseType_t ) portGET_CORE_ID();
3650 /* If uxSchedulerSuspended is zero then this function does not match a
3651 * previous call to vTaskSuspendAll(). */
3652 configASSERT( uxSchedulerSuspended != 0U );
3654 --uxSchedulerSuspended;
3655 portRELEASE_TASK_LOCK();
3657 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
3659 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
3661 /* Move any readied tasks from the pending list into the
3662 * appropriate ready list. */
3663 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
3665 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3666 listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
3667 portMEMORY_BARRIER();
3668 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
3669 prvAddTaskToReadyList( pxTCB );
3671 #if ( configNUMBER_OF_CORES == 1 )
3673 /* If the moved task has a priority higher than the current
3674 * task then a yield must be performed. */
3675 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
3677 xYieldPendings[ xCoreID ] = pdTRUE;
3681 mtCOVERAGE_TEST_MARKER();
3684 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
3686 /* All appropriate tasks yield at the moment a task is added to xPendingReadyList.
3687 * If the current core yielded then vTaskSwitchContext() has already been called
3688 * which sets xYieldPendings for the current core to pdTRUE. */
3690 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3695 /* A task was unblocked while the scheduler was suspended,
3696 * which may have prevented the next unblock time from being
3697 * re-calculated, in which case re-calculate it now. Mainly
3698 * important for low power tickless implementations, where
3699 * this can prevent an unnecessary exit from low power
3701 prvResetNextTaskUnblockTime();
3704 /* If any ticks occurred while the scheduler was suspended then
3705 * they should be processed now. This ensures the tick count does
3706 * not slip, and that any delayed tasks are resumed at the correct
3709 * It should be safe to call xTaskIncrementTick here from any core
3710 * since we are in a critical section and xTaskIncrementTick itself
3711 * protects itself within a critical section. Suspending the scheduler
3712 * from any core causes xTaskIncrementTick to increment uxPendedCounts. */
3714 TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
3716 if( xPendedCounts > ( TickType_t ) 0U )
3720 if( xTaskIncrementTick() != pdFALSE )
3722 /* Other cores are interrupted from
3723 * within xTaskIncrementTick(). */
3724 xYieldPendings[ xCoreID ] = pdTRUE;
3728 mtCOVERAGE_TEST_MARKER();
3732 } while( xPendedCounts > ( TickType_t ) 0U );
3738 mtCOVERAGE_TEST_MARKER();
3742 if( xYieldPendings[ xCoreID ] != pdFALSE )
3744 #if ( configUSE_PREEMPTION != 0 )
3746 xAlreadyYielded = pdTRUE;
3748 #endif /* #if ( configUSE_PREEMPTION != 0 ) */
3750 #if ( configNUMBER_OF_CORES == 1 )
3752 taskYIELD_TASK_CORE_IF_USING_PREEMPTION( pxCurrentTCB );
3754 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3758 mtCOVERAGE_TEST_MARKER();
3764 mtCOVERAGE_TEST_MARKER();
3767 taskEXIT_CRITICAL();
3770 traceRETURN_xTaskResumeAll( xAlreadyYielded );
3772 return xAlreadyYielded;
3774 /*-----------------------------------------------------------*/
3776 TickType_t xTaskGetTickCount( void )
3780 traceENTER_xTaskGetTickCount();
3782 /* Critical section required if running on a 16 bit processor. */
3783 portTICK_TYPE_ENTER_CRITICAL();
3785 xTicks = xTickCount;
3787 portTICK_TYPE_EXIT_CRITICAL();
3789 traceRETURN_xTaskGetTickCount( xTicks );
3793 /*-----------------------------------------------------------*/
3795 TickType_t xTaskGetTickCountFromISR( void )
3798 UBaseType_t uxSavedInterruptStatus;
3800 traceENTER_xTaskGetTickCountFromISR();
3802 /* RTOS ports that support interrupt nesting have the concept of a maximum
3803 * system call (or maximum API call) interrupt priority. Interrupts that are
3804 * above the maximum system call priority are kept permanently enabled, even
3805 * when the RTOS kernel is in a critical section, but cannot make any calls to
3806 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
3807 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
3808 * failure if a FreeRTOS API function is called from an interrupt that has been
3809 * assigned a priority above the configured maximum system call priority.
3810 * Only FreeRTOS functions that end in FromISR can be called from interrupts
3811 * that have been assigned a priority at or (logically) below the maximum
3812 * system call interrupt priority. FreeRTOS maintains a separate interrupt
3813 * safe API to ensure interrupt entry is as fast and as simple as possible.
3814 * More information (albeit Cortex-M specific) is provided on the following
3815 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
3816 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
3818 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
3820 xReturn = xTickCount;
3822 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
3824 traceRETURN_xTaskGetTickCountFromISR( xReturn );
3828 /*-----------------------------------------------------------*/
3830 UBaseType_t uxTaskGetNumberOfTasks( void )
3832 traceENTER_uxTaskGetNumberOfTasks();
3834 /* A critical section is not required because the variables are of type
3836 traceRETURN_uxTaskGetNumberOfTasks( uxCurrentNumberOfTasks );
3838 return uxCurrentNumberOfTasks;
3840 /*-----------------------------------------------------------*/
3842 char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3846 traceENTER_pcTaskGetName( xTaskToQuery );
3848 /* If null is passed in here then the name of the calling task is being
3850 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3851 configASSERT( pxTCB );
3853 traceRETURN_pcTaskGetName( &( pxTCB->pcTaskName[ 0 ] ) );
3855 return &( pxTCB->pcTaskName[ 0 ] );
3857 /*-----------------------------------------------------------*/
3859 #if ( INCLUDE_xTaskGetHandle == 1 )
3861 #if ( configNUMBER_OF_CORES == 1 )
3862 static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
3863 const char pcNameToQuery[] )
3867 TCB_t * pxReturn = NULL;
3870 BaseType_t xBreakLoop;
3872 /* This function is called with the scheduler suspended. */
3874 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3876 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3880 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3882 /* Check each character in the name looking for a match or
3884 xBreakLoop = pdFALSE;
3886 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
3888 cNextChar = pxNextTCB->pcTaskName[ x ];
3890 if( cNextChar != pcNameToQuery[ x ] )
3892 /* Characters didn't match. */
3893 xBreakLoop = pdTRUE;
3895 else if( cNextChar == ( char ) 0x00 )
3897 /* Both strings terminated, a match must have been
3899 pxReturn = pxNextTCB;
3900 xBreakLoop = pdTRUE;
3904 mtCOVERAGE_TEST_MARKER();
3907 if( xBreakLoop != pdFALSE )
3913 if( pxReturn != NULL )
3915 /* The handle has been found. */
3918 } while( pxNextTCB != pxFirstTCB );
3922 mtCOVERAGE_TEST_MARKER();
3927 #else /* if ( configNUMBER_OF_CORES == 1 ) */
3928 static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
3929 const char pcNameToQuery[] )
3931 TCB_t * pxReturn = NULL;
3934 BaseType_t xBreakLoop;
3935 const ListItem_t * pxEndMarker = listGET_END_MARKER( pxList );
3936 ListItem_t * pxIterator;
3938 /* This function is called with the scheduler suspended. */
3940 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3942 for( pxIterator = listGET_HEAD_ENTRY( pxList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) )
3944 TCB_t * pxTCB = listGET_LIST_ITEM_OWNER( pxIterator );
3946 /* Check each character in the name looking for a match or
3948 xBreakLoop = pdFALSE;
3950 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
3952 cNextChar = pxTCB->pcTaskName[ x ];
3954 if( cNextChar != pcNameToQuery[ x ] )
3956 /* Characters didn't match. */
3957 xBreakLoop = pdTRUE;
3959 else if( cNextChar == ( char ) 0x00 )
3961 /* Both strings terminated, a match must have been
3964 xBreakLoop = pdTRUE;
3968 mtCOVERAGE_TEST_MARKER();
3971 if( xBreakLoop != pdFALSE )
3977 if( pxReturn != NULL )
3979 /* The handle has been found. */
3986 mtCOVERAGE_TEST_MARKER();
3991 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
3993 #endif /* INCLUDE_xTaskGetHandle */
3994 /*-----------------------------------------------------------*/
3996 #if ( INCLUDE_xTaskGetHandle == 1 )
3998 TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
4000 UBaseType_t uxQueue = configMAX_PRIORITIES;
4003 traceENTER_xTaskGetHandle( pcNameToQuery );
4005 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
4006 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
4010 /* Search the ready lists. */
4014 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
4018 /* Found the handle. */
4021 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4023 /* Search the delayed lists. */
4026 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
4031 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
4034 #if ( INCLUDE_vTaskSuspend == 1 )
4038 /* Search the suspended list. */
4039 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
4044 #if ( INCLUDE_vTaskDelete == 1 )
4048 /* Search the deleted list. */
4049 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
4054 ( void ) xTaskResumeAll();
4056 traceRETURN_xTaskGetHandle( pxTCB );
4061 #endif /* INCLUDE_xTaskGetHandle */
4062 /*-----------------------------------------------------------*/
4064 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
4066 BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
4067 StackType_t ** ppuxStackBuffer,
4068 StaticTask_t ** ppxTaskBuffer )
4073 traceENTER_xTaskGetStaticBuffers( xTask, ppuxStackBuffer, ppxTaskBuffer );
4075 configASSERT( ppuxStackBuffer != NULL );
4076 configASSERT( ppxTaskBuffer != NULL );
4078 pxTCB = prvGetTCBFromHandle( xTask );
4080 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
4082 if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
4084 *ppuxStackBuffer = pxTCB->pxStack;
4085 *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
4088 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
4090 *ppuxStackBuffer = pxTCB->pxStack;
4091 *ppxTaskBuffer = NULL;
4099 #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
4101 *ppuxStackBuffer = pxTCB->pxStack;
4102 *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
4105 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
4107 traceRETURN_xTaskGetStaticBuffers( xReturn );
4112 #endif /* configSUPPORT_STATIC_ALLOCATION */
4113 /*-----------------------------------------------------------*/
4115 #if ( configUSE_TRACE_FACILITY == 1 )
4117 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
4118 const UBaseType_t uxArraySize,
4119 configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime )
4121 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
4123 traceENTER_uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime );
4127 /* Is there a space in the array for each task in the system? */
4128 if( uxArraySize >= uxCurrentNumberOfTasks )
4130 /* Fill in an TaskStatus_t structure with information on each
4131 * task in the Ready state. */
4135 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ) );
4136 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4138 /* Fill in an TaskStatus_t structure with information on each
4139 * task in the Blocked state. */
4140 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ) );
4141 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ) );
4143 #if ( INCLUDE_vTaskDelete == 1 )
4145 /* Fill in an TaskStatus_t structure with information on
4146 * each task that has been deleted but not yet cleaned up. */
4147 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ) );
4151 #if ( INCLUDE_vTaskSuspend == 1 )
4153 /* Fill in an TaskStatus_t structure with information on
4154 * each task in the Suspended state. */
4155 uxTask = ( UBaseType_t ) ( uxTask + prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ) );
4159 #if ( configGENERATE_RUN_TIME_STATS == 1 )
4161 if( pulTotalRunTime != NULL )
4163 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
4164 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
4166 *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
4170 #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
4172 if( pulTotalRunTime != NULL )
4174 *pulTotalRunTime = 0;
4177 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
4181 mtCOVERAGE_TEST_MARKER();
4184 ( void ) xTaskResumeAll();
4186 traceRETURN_uxTaskGetSystemState( uxTask );
4191 #endif /* configUSE_TRACE_FACILITY */
4192 /*----------------------------------------------------------*/
4194 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
4196 /* SMP_TODO : This function returns only idle task handle for core 0.
4197 * Consider to add another function to return the idle task handles. */
4198 TaskHandle_t xTaskGetIdleTaskHandle( void )
4200 traceENTER_xTaskGetIdleTaskHandle();
4202 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
4203 * started, then xIdleTaskHandles will be NULL. */
4204 configASSERT( ( xIdleTaskHandles[ 0 ] != NULL ) );
4206 traceRETURN_xTaskGetIdleTaskHandle( xIdleTaskHandles[ 0 ] );
4208 return xIdleTaskHandles[ 0 ];
4211 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
4212 /*----------------------------------------------------------*/
4214 /* This conditional compilation should use inequality to 0, not equality to 1.
4215 * This is to ensure vTaskStepTick() is available when user defined low power mode
4216 * implementations require configUSE_TICKLESS_IDLE to be set to a value other than
4218 #if ( configUSE_TICKLESS_IDLE != 0 )
4220 void vTaskStepTick( TickType_t xTicksToJump )
4222 traceENTER_vTaskStepTick( xTicksToJump );
4224 /* Correct the tick count value after a period during which the tick
4225 * was suppressed. Note this does *not* call the tick hook function for
4226 * each stepped tick. */
4227 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
4229 if( ( xTickCount + xTicksToJump ) == xNextTaskUnblockTime )
4231 /* Arrange for xTickCount to reach xNextTaskUnblockTime in
4232 * xTaskIncrementTick() when the scheduler resumes. This ensures
4233 * that any delayed tasks are resumed at the correct time. */
4234 configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
4235 configASSERT( xTicksToJump != ( TickType_t ) 0 );
4237 /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
4238 taskENTER_CRITICAL();
4242 taskEXIT_CRITICAL();
4247 mtCOVERAGE_TEST_MARKER();
4250 xTickCount += xTicksToJump;
4252 traceINCREASE_TICK_COUNT( xTicksToJump );
4253 traceRETURN_vTaskStepTick();
4256 #endif /* configUSE_TICKLESS_IDLE */
4257 /*----------------------------------------------------------*/
4259 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
4261 BaseType_t xYieldOccurred;
4263 traceENTER_xTaskCatchUpTicks( xTicksToCatchUp );
4265 /* Must not be called with the scheduler suspended as the implementation
4266 * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
4267 configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
4269 /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
4270 * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
4273 /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
4274 taskENTER_CRITICAL();
4276 xPendedTicks += xTicksToCatchUp;
4278 taskEXIT_CRITICAL();
4279 xYieldOccurred = xTaskResumeAll();
4281 traceRETURN_xTaskCatchUpTicks( xYieldOccurred );
4283 return xYieldOccurred;
4285 /*----------------------------------------------------------*/
4287 #if ( INCLUDE_xTaskAbortDelay == 1 )
4289 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
4291 TCB_t * pxTCB = xTask;
4294 traceENTER_xTaskAbortDelay( xTask );
4296 configASSERT( pxTCB );
4300 /* A task can only be prematurely removed from the Blocked state if
4301 * it is actually in the Blocked state. */
4302 if( eTaskGetState( xTask ) == eBlocked )
4306 /* Remove the reference to the task from the blocked list. An
4307 * interrupt won't touch the xStateListItem because the
4308 * scheduler is suspended. */
4309 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4311 /* Is the task waiting on an event also? If so remove it from
4312 * the event list too. Interrupts can touch the event list item,
4313 * even though the scheduler is suspended, so a critical section
4315 taskENTER_CRITICAL();
4317 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4319 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
4321 /* This lets the task know it was forcibly removed from the
4322 * blocked state so it should not re-evaluate its block time and
4323 * then block again. */
4324 pxTCB->ucDelayAborted = pdTRUE;
4328 mtCOVERAGE_TEST_MARKER();
4331 taskEXIT_CRITICAL();
4333 /* Place the unblocked task into the appropriate ready list. */
4334 prvAddTaskToReadyList( pxTCB );
4336 /* A task being unblocked cannot cause an immediate context
4337 * switch if preemption is turned off. */
4338 #if ( configUSE_PREEMPTION == 1 )
4340 #if ( configNUMBER_OF_CORES == 1 )
4342 /* Preemption is on, but a context switch should only be
4343 * performed if the unblocked task has a priority that is
4344 * higher than the currently executing task. */
4345 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4347 /* Pend the yield to be performed when the scheduler
4348 * is unsuspended. */
4349 xYieldPendings[ 0 ] = pdTRUE;
4353 mtCOVERAGE_TEST_MARKER();
4356 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4358 taskENTER_CRITICAL();
4360 prvYieldForTask( pxTCB );
4362 taskEXIT_CRITICAL();
4364 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4366 #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4373 ( void ) xTaskResumeAll();
4375 traceRETURN_xTaskAbortDelay( xReturn );
4380 #endif /* INCLUDE_xTaskAbortDelay */
4381 /*----------------------------------------------------------*/
4383 BaseType_t xTaskIncrementTick( void )
4386 TickType_t xItemValue;
4387 BaseType_t xSwitchRequired = pdFALSE;
4389 traceENTER_xTaskIncrementTick();
4391 #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 )
4392 BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE };
4393 #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */
4395 /* Called by the portable layer each time a tick interrupt occurs.
4396 * Increments the tick then checks to see if the new tick value will cause any
4397 * tasks to be unblocked. */
4398 traceTASK_INCREMENT_TICK( xTickCount );
4400 /* Tick increment should occur on every kernel timer event. Core 0 has the
4401 * responsibility to increment the tick, or increment the pended ticks if the
4402 * scheduler is suspended. If pended ticks is greater than zero, the core that
4403 * calls xTaskResumeAll has the responsibility to increment the tick. */
4404 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
4406 /* Minor optimisation. The tick count cannot change in this
4408 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
4410 /* Increment the RTOS tick, switching the delayed and overflowed
4411 * delayed lists if it wraps to 0. */
4412 xTickCount = xConstTickCount;
4414 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
4416 taskSWITCH_DELAYED_LISTS();
4420 mtCOVERAGE_TEST_MARKER();
4423 /* See if this tick has made a timeout expire. Tasks are stored in
4424 * the queue in the order of their wake time - meaning once one task
4425 * has been found whose block time has not expired there is no need to
4426 * look any further down the list. */
4427 if( xConstTickCount >= xNextTaskUnblockTime )
4431 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
4433 /* The delayed list is empty. Set xNextTaskUnblockTime
4434 * to the maximum possible value so it is extremely
4436 * if( xTickCount >= xNextTaskUnblockTime ) test will pass
4437 * next time through. */
4438 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4443 /* The delayed list is not empty, get the value of the
4444 * item at the head of the delayed list. This is the time
4445 * at which the task at the head of the delayed list must
4446 * be removed from the Blocked state. */
4447 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4448 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
4450 if( xConstTickCount < xItemValue )
4452 /* It is not time to unblock this item yet, but the
4453 * item value is the time at which the task at the head
4454 * of the blocked list must be removed from the Blocked
4455 * state - so record the item value in
4456 * xNextTaskUnblockTime. */
4457 xNextTaskUnblockTime = xItemValue;
4458 break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */
4462 mtCOVERAGE_TEST_MARKER();
4465 /* It is time to remove the item from the Blocked state. */
4466 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
4468 /* Is the task waiting on an event also? If so remove
4469 * it from the event list. */
4470 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4472 listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
4476 mtCOVERAGE_TEST_MARKER();
4479 /* Place the unblocked task into the appropriate ready
4481 prvAddTaskToReadyList( pxTCB );
4483 /* A task being unblocked cannot cause an immediate
4484 * context switch if preemption is turned off. */
4485 #if ( configUSE_PREEMPTION == 1 )
4487 #if ( configNUMBER_OF_CORES == 1 )
4489 /* Preemption is on, but a context switch should
4490 * only be performed if the unblocked task's
4491 * priority is higher than the currently executing
4493 * The case of equal priority tasks sharing
4494 * processing time (which happens when both
4495 * preemption and time slicing are on) is
4497 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4499 xSwitchRequired = pdTRUE;
4503 mtCOVERAGE_TEST_MARKER();
4506 #else /* #if( configNUMBER_OF_CORES == 1 ) */
4508 prvYieldForTask( pxTCB );
4510 #endif /* #if( configNUMBER_OF_CORES == 1 ) */
4512 #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4517 /* Tasks of equal priority to the currently running task will share
4518 * processing time (time slice) if preemption is on, and the application
4519 * writer has not explicitly turned time slicing off. */
4520 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
4522 #if ( configNUMBER_OF_CORES == 1 )
4524 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
4526 xSwitchRequired = pdTRUE;
4530 mtCOVERAGE_TEST_MARKER();
4533 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4537 for( xCoreID = 0; xCoreID < ( ( BaseType_t ) configNUMBER_OF_CORES ); xCoreID++ )
4539 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ) ) > 1 )
4541 xYieldRequiredForCore[ xCoreID ] = pdTRUE;
4545 mtCOVERAGE_TEST_MARKER();
4549 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4551 #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
4553 #if ( configUSE_TICK_HOOK == 1 )
4555 /* Guard against the tick hook being called when the pended tick
4556 * count is being unwound (when the scheduler is being unlocked). */
4557 if( xPendedTicks == ( TickType_t ) 0 )
4559 vApplicationTickHook();
4563 mtCOVERAGE_TEST_MARKER();
4566 #endif /* configUSE_TICK_HOOK */
4568 #if ( configUSE_PREEMPTION == 1 )
4570 #if ( configNUMBER_OF_CORES == 1 )
4572 /* For single core the core ID is always 0. */
4573 if( xYieldPendings[ 0 ] != pdFALSE )
4575 xSwitchRequired = pdTRUE;
4579 mtCOVERAGE_TEST_MARKER();
4582 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
4584 BaseType_t xCoreID, xCurrentCoreID;
4585 xCurrentCoreID = ( BaseType_t ) portGET_CORE_ID();
4587 for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
4589 #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
4590 if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
4593 if( ( xYieldRequiredForCore[ xCoreID ] != pdFALSE ) || ( xYieldPendings[ xCoreID ] != pdFALSE ) )
4595 if( xCoreID == xCurrentCoreID )
4597 xSwitchRequired = pdTRUE;
4601 prvYieldCore( xCoreID );
4606 mtCOVERAGE_TEST_MARKER();
4611 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
4613 #endif /* #if ( configUSE_PREEMPTION == 1 ) */
4619 /* The tick hook gets called at regular intervals, even if the
4620 * scheduler is locked. */
4621 #if ( configUSE_TICK_HOOK == 1 )
4623 vApplicationTickHook();
4628 traceRETURN_xTaskIncrementTick( xSwitchRequired );
4630 return xSwitchRequired;
4632 /*-----------------------------------------------------------*/
4634 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
4636 void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
4637 TaskHookFunction_t pxHookFunction )
4641 traceENTER_vTaskSetApplicationTaskTag( xTask, pxHookFunction );
4643 /* If xTask is NULL then it is the task hook of the calling task that is
4647 xTCB = ( TCB_t * ) pxCurrentTCB;
4654 /* Save the hook function in the TCB. A critical section is required as
4655 * the value can be accessed from an interrupt. */
4656 taskENTER_CRITICAL();
4658 xTCB->pxTaskTag = pxHookFunction;
4660 taskEXIT_CRITICAL();
4662 traceRETURN_vTaskSetApplicationTaskTag();
4665 #endif /* configUSE_APPLICATION_TASK_TAG */
4666 /*-----------------------------------------------------------*/
4668 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
4670 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
4673 TaskHookFunction_t xReturn;
4675 traceENTER_xTaskGetApplicationTaskTag( xTask );
4677 /* If xTask is NULL then set the calling task's hook. */
4678 pxTCB = prvGetTCBFromHandle( xTask );
4680 /* Save the hook function in the TCB. A critical section is required as
4681 * the value can be accessed from an interrupt. */
4682 taskENTER_CRITICAL();
4684 xReturn = pxTCB->pxTaskTag;
4686 taskEXIT_CRITICAL();
4688 traceRETURN_xTaskGetApplicationTaskTag( xReturn );
4693 #endif /* configUSE_APPLICATION_TASK_TAG */
4694 /*-----------------------------------------------------------*/
4696 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
4698 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
4701 TaskHookFunction_t xReturn;
4702 UBaseType_t uxSavedInterruptStatus;
4704 traceENTER_xTaskGetApplicationTaskTagFromISR( xTask );
4706 /* If xTask is NULL then set the calling task's hook. */
4707 pxTCB = prvGetTCBFromHandle( xTask );
4709 /* Save the hook function in the TCB. A critical section is required as
4710 * the value can be accessed from an interrupt. */
4711 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
4713 xReturn = pxTCB->pxTaskTag;
4715 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
4717 traceRETURN_xTaskGetApplicationTaskTagFromISR( xReturn );
4722 #endif /* configUSE_APPLICATION_TASK_TAG */
4723 /*-----------------------------------------------------------*/
4725 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
4727 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
4728 void * pvParameter )
4733 traceENTER_xTaskCallApplicationTaskHook( xTask, pvParameter );
4735 /* If xTask is NULL then we are calling our own task hook. */
4738 xTCB = pxCurrentTCB;
4745 if( xTCB->pxTaskTag != NULL )
4747 xReturn = xTCB->pxTaskTag( pvParameter );
4754 traceRETURN_xTaskCallApplicationTaskHook( xReturn );
4759 #endif /* configUSE_APPLICATION_TASK_TAG */
4760 /*-----------------------------------------------------------*/
4762 #if ( configNUMBER_OF_CORES == 1 )
4763 void vTaskSwitchContext( void )
4765 traceENTER_vTaskSwitchContext();
4767 if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
4769 /* The scheduler is currently suspended - do not allow a context
4771 xYieldPendings[ 0 ] = pdTRUE;
4775 xYieldPendings[ 0 ] = pdFALSE;
4776 traceTASK_SWITCHED_OUT();
4778 #if ( configGENERATE_RUN_TIME_STATS == 1 )
4780 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
4781 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ 0 ] );
4783 ulTotalRunTime[ 0 ] = portGET_RUN_TIME_COUNTER_VALUE();
4786 /* Add the amount of time the task has been running to the
4787 * accumulated time so far. The time the task started running was
4788 * stored in ulTaskSwitchedInTime. Note that there is no overflow
4789 * protection here so count values are only valid until the timer
4790 * overflows. The guard against negative values is to protect
4791 * against suspect run time stat counter implementations - which
4792 * are provided by the application, not the kernel. */
4793 if( ulTotalRunTime[ 0 ] > ulTaskSwitchedInTime[ 0 ] )
4795 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ 0 ] - ulTaskSwitchedInTime[ 0 ] );
4799 mtCOVERAGE_TEST_MARKER();
4802 ulTaskSwitchedInTime[ 0 ] = ulTotalRunTime[ 0 ];
4804 #endif /* configGENERATE_RUN_TIME_STATS */
4806 /* Check for stack overflow, if configured. */
4807 taskCHECK_FOR_STACK_OVERFLOW();
4809 /* Before the currently running task is switched out, save its errno. */
4810 #if ( configUSE_POSIX_ERRNO == 1 )
4812 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
4816 /* Select a new task to run using either the generic C or port
4817 * optimised asm code. */
4818 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4819 traceTASK_SWITCHED_IN();
4821 /* After the new task is switched in, update the global errno. */
4822 #if ( configUSE_POSIX_ERRNO == 1 )
4824 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
4828 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
4830 /* Switch C-Runtime's TLS Block to point to the TLS
4831 * Block specific to this task. */
4832 configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
4837 traceRETURN_vTaskSwitchContext();
4839 #else /* if ( configNUMBER_OF_CORES == 1 ) */
4840 void vTaskSwitchContext( BaseType_t xCoreID )
4842 traceENTER_vTaskSwitchContext();
4844 /* Acquire both locks:
4845 * - The ISR lock protects the ready list from simultaneous access by
4846 * both other ISRs and tasks.
4847 * - We also take the task lock to pause here in case another core has
4848 * suspended the scheduler. We don't want to simply set xYieldPending
4849 * and move on if another core suspended the scheduler. We should only
4850 * do that if the current core has suspended the scheduler. */
4852 portGET_TASK_LOCK(); /* Must always acquire the task lock first. */
4855 /* vTaskSwitchContext() must never be called from within a critical section.
4856 * This is not necessarily true for single core FreeRTOS, but it is for this
4858 configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 );
4860 if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
4862 /* The scheduler is currently suspended - do not allow a context
4864 xYieldPendings[ xCoreID ] = pdTRUE;
4868 xYieldPendings[ xCoreID ] = pdFALSE;
4869 traceTASK_SWITCHED_OUT();
4871 #if ( configGENERATE_RUN_TIME_STATS == 1 )
4873 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
4874 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ xCoreID ] );
4876 ulTotalRunTime[ xCoreID ] = portGET_RUN_TIME_COUNTER_VALUE();
4879 /* Add the amount of time the task has been running to the
4880 * accumulated time so far. The time the task started running was
4881 * stored in ulTaskSwitchedInTime. Note that there is no overflow
4882 * protection here so count values are only valid until the timer
4883 * overflows. The guard against negative values is to protect
4884 * against suspect run time stat counter implementations - which
4885 * are provided by the application, not the kernel. */
4886 if( ulTotalRunTime[ xCoreID ] > ulTaskSwitchedInTime[ xCoreID ] )
4888 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ xCoreID ] - ulTaskSwitchedInTime[ xCoreID ] );
4892 mtCOVERAGE_TEST_MARKER();
4895 ulTaskSwitchedInTime[ xCoreID ] = ulTotalRunTime[ xCoreID ];
4897 #endif /* configGENERATE_RUN_TIME_STATS */
4899 /* Check for stack overflow, if configured. */
4900 taskCHECK_FOR_STACK_OVERFLOW();
4902 /* Before the currently running task is switched out, save its errno. */
4903 #if ( configUSE_POSIX_ERRNO == 1 )
4905 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
4909 /* Select a new task to run. */
4910 taskSELECT_HIGHEST_PRIORITY_TASK( xCoreID );
4911 traceTASK_SWITCHED_IN();
4913 /* After the new task is switched in, update the global errno. */
4914 #if ( configUSE_POSIX_ERRNO == 1 )
4916 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
4920 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
4922 /* Switch C-Runtime's TLS Block to point to the TLS
4923 * Block specific to this task. */
4924 configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
4929 portRELEASE_ISR_LOCK();
4930 portRELEASE_TASK_LOCK();
4932 traceRETURN_vTaskSwitchContext();
4934 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
4935 /*-----------------------------------------------------------*/
4937 void vTaskPlaceOnEventList( List_t * const pxEventList,
4938 const TickType_t xTicksToWait )
4940 traceENTER_vTaskPlaceOnEventList( pxEventList, xTicksToWait );
4942 configASSERT( pxEventList );
4944 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
4945 * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
4947 /* Place the event list item of the TCB in the appropriate event list.
4948 * This is placed in the list in priority order so the highest priority task
4949 * is the first to be woken by the event.
4951 * Note: Lists are sorted in ascending order by ListItem_t.xItemValue.
4952 * Normally, the xItemValue of a TCB's ListItem_t members is:
4953 * xItemValue = ( configMAX_PRIORITIES - uxPriority )
4954 * Therefore, the event list is sorted in descending priority order.
4956 * The queue that contains the event list is locked, preventing
4957 * simultaneous access from interrupts. */
4958 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
4960 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4962 traceRETURN_vTaskPlaceOnEventList();
4964 /*-----------------------------------------------------------*/
4966 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
4967 const TickType_t xItemValue,
4968 const TickType_t xTicksToWait )
4970 traceENTER_vTaskPlaceOnUnorderedEventList( pxEventList, xItemValue, xTicksToWait );
4972 configASSERT( pxEventList );
4974 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
4975 * the event groups implementation. */
4976 configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
4978 /* Store the item value in the event list item. It is safe to access the
4979 * event list item here as interrupts won't access the event list item of a
4980 * task that is not in the Blocked state. */
4981 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
4983 /* Place the event list item of the TCB at the end of the appropriate event
4984 * list. It is safe to access the event list here because it is part of an
4985 * event group implementation - and interrupts don't access event groups
4986 * directly (instead they access them indirectly by pending function calls to
4987 * the task level). */
4988 listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
4990 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4992 traceRETURN_vTaskPlaceOnUnorderedEventList();
4994 /*-----------------------------------------------------------*/
4996 #if ( configUSE_TIMERS == 1 )
4998 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
4999 TickType_t xTicksToWait,
5000 const BaseType_t xWaitIndefinitely )
5002 traceENTER_vTaskPlaceOnEventListRestricted( pxEventList, xTicksToWait, xWaitIndefinitely );
5004 configASSERT( pxEventList );
5006 /* This function should not be called by application code hence the
5007 * 'Restricted' in its name. It is not part of the public API. It is
5008 * designed for use by kernel code, and has special calling requirements -
5009 * it should be called with the scheduler suspended. */
5012 /* Place the event list item of the TCB in the appropriate event list.
5013 * In this case it is assume that this is the only task that is going to
5014 * be waiting on this event list, so the faster vListInsertEnd() function
5015 * can be used in place of vListInsert. */
5016 listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
5018 /* If the task should block indefinitely then set the block time to a
5019 * value that will be recognised as an indefinite delay inside the
5020 * prvAddCurrentTaskToDelayedList() function. */
5021 if( xWaitIndefinitely != pdFALSE )
5023 xTicksToWait = portMAX_DELAY;
5026 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
5027 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
5029 traceRETURN_vTaskPlaceOnEventListRestricted();
5032 #endif /* configUSE_TIMERS */
5033 /*-----------------------------------------------------------*/
5035 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
5037 TCB_t * pxUnblockedTCB;
5040 traceENTER_xTaskRemoveFromEventList( pxEventList );
5042 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
5043 * called from a critical section within an ISR. */
5045 /* The event list is sorted in priority order, so the first in the list can
5046 * be removed as it is known to be the highest priority. Remove the TCB from
5047 * the delayed list, and add it to the ready list.
5049 * If an event is for a queue that is locked then this function will never
5050 * get called - the lock count on the queue will get modified instead. This
5051 * means exclusive access to the event list is guaranteed here.
5053 * This function assumes that a check has already been made to ensure that
5054 * pxEventList is not empty. */
5055 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
5056 configASSERT( pxUnblockedTCB );
5057 listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
5059 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
5061 listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
5062 prvAddTaskToReadyList( pxUnblockedTCB );
5064 #if ( configUSE_TICKLESS_IDLE != 0 )
5066 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
5067 * might be set to the blocked task's time out time. If the task is
5068 * unblocked for a reason other than a timeout xNextTaskUnblockTime is
5069 * normally left unchanged, because it is automatically reset to a new
5070 * value when the tick count equals xNextTaskUnblockTime. However if
5071 * tickless idling is used it might be more important to enter sleep mode
5072 * at the earliest possible time - so reset xNextTaskUnblockTime here to
5073 * ensure it is updated at the earliest possible time. */
5074 prvResetNextTaskUnblockTime();
5080 /* The delayed and ready lists cannot be accessed, so hold this task
5081 * pending until the scheduler is resumed. */
5082 listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
5085 #if ( configNUMBER_OF_CORES == 1 )
5087 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
5089 /* Return true if the task removed from the event list has a higher
5090 * priority than the calling task. This allows the calling task to know if
5091 * it should force a context switch now. */
5094 /* Mark that a yield is pending in case the user is not using the
5095 * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
5096 xYieldPendings[ 0 ] = pdTRUE;
5103 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
5107 #if ( configUSE_PREEMPTION == 1 )
5109 prvYieldForTask( pxUnblockedTCB );
5111 if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
5116 #endif /* #if ( configUSE_PREEMPTION == 1 ) */
5118 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
5120 traceRETURN_xTaskRemoveFromEventList( xReturn );
5123 /*-----------------------------------------------------------*/
5125 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
5126 const TickType_t xItemValue )
5128 TCB_t * pxUnblockedTCB;
5130 traceENTER_vTaskRemoveFromUnorderedEventList( pxEventListItem, xItemValue );
5132 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
5133 * the event flags implementation. */
5134 configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
5136 /* Store the new item value in the event list. */
5137 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
5139 /* Remove the event list form the event flag. Interrupts do not access
5141 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
5142 configASSERT( pxUnblockedTCB );
5143 listREMOVE_ITEM( pxEventListItem );
5145 #if ( configUSE_TICKLESS_IDLE != 0 )
5147 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
5148 * might be set to the blocked task's time out time. If the task is
5149 * unblocked for a reason other than a timeout xNextTaskUnblockTime is
5150 * normally left unchanged, because it is automatically reset to a new
5151 * value when the tick count equals xNextTaskUnblockTime. However if
5152 * tickless idling is used it might be more important to enter sleep mode
5153 * at the earliest possible time - so reset xNextTaskUnblockTime here to
5154 * ensure it is updated at the earliest possible time. */
5155 prvResetNextTaskUnblockTime();
5159 /* Remove the task from the delayed list and add it to the ready list. The
5160 * scheduler is suspended so interrupts will not be accessing the ready
5162 listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
5163 prvAddTaskToReadyList( pxUnblockedTCB );
5165 #if ( configNUMBER_OF_CORES == 1 )
5167 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
5169 /* The unblocked task has a priority above that of the calling task, so
5170 * a context switch is required. This function is called with the
5171 * scheduler suspended so xYieldPending is set so the context switch
5172 * occurs immediately that the scheduler is resumed (unsuspended). */
5173 xYieldPendings[ 0 ] = pdTRUE;
5176 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
5178 #if ( configUSE_PREEMPTION == 1 )
5180 taskENTER_CRITICAL();
5182 prvYieldForTask( pxUnblockedTCB );
5184 taskEXIT_CRITICAL();
5188 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
5190 traceRETURN_vTaskRemoveFromUnorderedEventList();
5192 /*-----------------------------------------------------------*/
5194 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
5196 traceENTER_vTaskSetTimeOutState( pxTimeOut );
5198 configASSERT( pxTimeOut );
5199 taskENTER_CRITICAL();
5201 pxTimeOut->xOverflowCount = xNumOfOverflows;
5202 pxTimeOut->xTimeOnEntering = xTickCount;
5204 taskEXIT_CRITICAL();
5206 traceRETURN_vTaskSetTimeOutState();
5208 /*-----------------------------------------------------------*/
5210 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
5212 traceENTER_vTaskInternalSetTimeOutState( pxTimeOut );
5214 /* For internal use only as it does not use a critical section. */
5215 pxTimeOut->xOverflowCount = xNumOfOverflows;
5216 pxTimeOut->xTimeOnEntering = xTickCount;
5218 traceRETURN_vTaskInternalSetTimeOutState();
5220 /*-----------------------------------------------------------*/
5222 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
5223 TickType_t * const pxTicksToWait )
5227 traceENTER_xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait );
5229 configASSERT( pxTimeOut );
5230 configASSERT( pxTicksToWait );
5232 taskENTER_CRITICAL();
5234 /* Minor optimisation. The tick count cannot change in this block. */
5235 const TickType_t xConstTickCount = xTickCount;
5236 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
5238 #if ( INCLUDE_xTaskAbortDelay == 1 )
5239 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
5241 /* The delay was aborted, which is not the same as a time out,
5242 * but has the same result. */
5243 pxCurrentTCB->ucDelayAborted = pdFALSE;
5249 #if ( INCLUDE_vTaskSuspend == 1 )
5250 if( *pxTicksToWait == portMAX_DELAY )
5252 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
5253 * specified is the maximum block time then the task should block
5254 * indefinitely, and therefore never time out. */
5260 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
5262 /* The tick count is greater than the time at which
5263 * vTaskSetTimeout() was called, but has also overflowed since
5264 * vTaskSetTimeOut() was called. It must have wrapped all the way
5265 * around and gone past again. This passed since vTaskSetTimeout()
5268 *pxTicksToWait = ( TickType_t ) 0;
5270 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
5272 /* Not a genuine timeout. Adjust parameters for time remaining. */
5273 *pxTicksToWait -= xElapsedTime;
5274 vTaskInternalSetTimeOutState( pxTimeOut );
5279 *pxTicksToWait = ( TickType_t ) 0;
5283 taskEXIT_CRITICAL();
5285 traceRETURN_xTaskCheckForTimeOut( xReturn );
5289 /*-----------------------------------------------------------*/
5291 void vTaskMissedYield( void )
5293 traceENTER_vTaskMissedYield();
5295 /* Must be called from within a critical section. */
5296 xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
5298 traceRETURN_vTaskMissedYield();
5300 /*-----------------------------------------------------------*/
5302 #if ( configUSE_TRACE_FACILITY == 1 )
5304 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
5306 UBaseType_t uxReturn;
5307 TCB_t const * pxTCB;
5309 traceENTER_uxTaskGetTaskNumber( xTask );
5314 uxReturn = pxTCB->uxTaskNumber;
5321 traceRETURN_uxTaskGetTaskNumber( uxReturn );
5326 #endif /* configUSE_TRACE_FACILITY */
5327 /*-----------------------------------------------------------*/
5329 #if ( configUSE_TRACE_FACILITY == 1 )
5331 void vTaskSetTaskNumber( TaskHandle_t xTask,
5332 const UBaseType_t uxHandle )
5336 traceENTER_vTaskSetTaskNumber( xTask, uxHandle );
5341 pxTCB->uxTaskNumber = uxHandle;
5344 traceRETURN_vTaskSetTaskNumber();
5347 #endif /* configUSE_TRACE_FACILITY */
5348 /*-----------------------------------------------------------*/
5351 * -----------------------------------------------------------
5352 * The MinimalIdle task.
5353 * ----------------------------------------------------------
5355 * The minimal idle task is used for all the additional cores in a SMP
5356 * system. There must be only 1 idle task and the rest are minimal idle
5359 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
5360 * language extensions. The equivalent prototype for this function is:
5362 * void prvMinimalIdleTask( void *pvParameters );
5365 #if ( configNUMBER_OF_CORES > 1 )
5366 static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters )
5368 ( void ) pvParameters;
5372 for( ; INFINITE_LOOP(); )
5374 #if ( configUSE_PREEMPTION == 0 )
5376 /* If we are not using preemption we keep forcing a task switch to
5377 * see if any other task has become available. If we are using
5378 * preemption we don't need to do this as any task becoming available
5379 * will automatically get the processor anyway. */
5382 #endif /* configUSE_PREEMPTION */
5384 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
5386 /* When using preemption tasks of equal priority will be
5387 * timesliced. If a task that is sharing the idle priority is ready
5388 * to run then the idle task should yield before the end of the
5391 * A critical region is not required here as we are just reading from
5392 * the list, and an occasional incorrect value will not matter. If
5393 * the ready list at the idle priority contains one more task than the
5394 * number of idle tasks, which is equal to the configured numbers of cores
5395 * then a task other than the idle task is ready to execute. */
5396 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
5402 mtCOVERAGE_TEST_MARKER();
5405 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
5407 #if ( configUSE_MINIMAL_IDLE_HOOK == 1 )
5409 /* Call the user defined function from within the idle task. This
5410 * allows the application designer to add background functionality
5411 * without the overhead of a separate task.
5413 * This hook is intended to manage core activity such as disabling cores that go idle.
5415 * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
5416 * CALL A FUNCTION THAT MIGHT BLOCK. */
5417 vApplicationMinimalIdleHook();
5419 #endif /* configUSE_MINIMAL_IDLE_HOOK */
5422 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
5425 * -----------------------------------------------------------
5427 * ----------------------------------------------------------
5429 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
5430 * language extensions. The equivalent prototype for this function is:
5432 * void prvIdleTask( void *pvParameters );
5436 static portTASK_FUNCTION( prvIdleTask, pvParameters )
5438 /* Stop warnings. */
5439 ( void ) pvParameters;
5441 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
5442 * SCHEDULER IS STARTED. **/
5444 /* In case a task that has a secure context deletes itself, in which case
5445 * the idle task is responsible for deleting the task's secure context, if
5447 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
5449 #if ( configNUMBER_OF_CORES > 1 )
5451 /* SMP all cores start up in the idle task. This initial yield gets the application
5455 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
5457 for( ; INFINITE_LOOP(); )
5459 /* See if any tasks have deleted themselves - if so then the idle task
5460 * is responsible for freeing the deleted task's TCB and stack. */
5461 prvCheckTasksWaitingTermination();
5463 #if ( configUSE_PREEMPTION == 0 )
5465 /* If we are not using preemption we keep forcing a task switch to
5466 * see if any other task has become available. If we are using
5467 * preemption we don't need to do this as any task becoming available
5468 * will automatically get the processor anyway. */
5471 #endif /* configUSE_PREEMPTION */
5473 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
5475 /* When using preemption tasks of equal priority will be
5476 * timesliced. If a task that is sharing the idle priority is ready
5477 * to run then the idle task should yield before the end of the
5480 * A critical region is not required here as we are just reading from
5481 * the list, and an occasional incorrect value will not matter. If
5482 * the ready list at the idle priority contains one more task than the
5483 * number of idle tasks, which is equal to the configured numbers of cores
5484 * then a task other than the idle task is ready to execute. */
5485 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES )
5491 mtCOVERAGE_TEST_MARKER();
5494 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
5496 #if ( configUSE_IDLE_HOOK == 1 )
5498 /* Call the user defined function from within the idle task. */
5499 vApplicationIdleHook();
5501 #endif /* configUSE_IDLE_HOOK */
5503 /* This conditional compilation should use inequality to 0, not equality
5504 * to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
5505 * user defined low power mode implementations require
5506 * configUSE_TICKLESS_IDLE to be set to a value other than 1. */
5507 #if ( configUSE_TICKLESS_IDLE != 0 )
5509 TickType_t xExpectedIdleTime;
5511 /* It is not desirable to suspend then resume the scheduler on
5512 * each iteration of the idle task. Therefore, a preliminary
5513 * test of the expected idle time is performed without the
5514 * scheduler suspended. The result here is not necessarily
5516 xExpectedIdleTime = prvGetExpectedIdleTime();
5518 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
5522 /* Now the scheduler is suspended, the expected idle
5523 * time can be sampled again, and this time its value can
5525 configASSERT( xNextTaskUnblockTime >= xTickCount );
5526 xExpectedIdleTime = prvGetExpectedIdleTime();
5528 /* Define the following macro to set xExpectedIdleTime to 0
5529 * if the application does not want
5530 * portSUPPRESS_TICKS_AND_SLEEP() to be called. */
5531 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
5533 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
5535 traceLOW_POWER_IDLE_BEGIN();
5536 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
5537 traceLOW_POWER_IDLE_END();
5541 mtCOVERAGE_TEST_MARKER();
5544 ( void ) xTaskResumeAll();
5548 mtCOVERAGE_TEST_MARKER();
5551 #endif /* configUSE_TICKLESS_IDLE */
5553 #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) )
5555 /* Call the user defined function from within the idle task. This
5556 * allows the application designer to add background functionality
5557 * without the overhead of a separate task.
5559 * This hook is intended to manage core activity such as disabling cores that go idle.
5561 * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
5562 * CALL A FUNCTION THAT MIGHT BLOCK. */
5563 vApplicationMinimalIdleHook();
5565 #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) */
5568 /*-----------------------------------------------------------*/
5570 #if ( configUSE_TICKLESS_IDLE != 0 )
5572 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
5574 #if ( INCLUDE_vTaskSuspend == 1 )
5575 /* The idle task exists in addition to the application tasks. */
5576 const UBaseType_t uxNonApplicationTasks = 1;
5577 #endif /* INCLUDE_vTaskSuspend */
5579 traceENTER_eTaskConfirmSleepModeStatus();
5581 eSleepModeStatus eReturn = eStandardSleep;
5583 /* This function must be called from a critical section. */
5585 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
5587 /* A task was made ready while the scheduler was suspended. */
5588 eReturn = eAbortSleep;
5590 else if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE )
5592 /* A yield was pended while the scheduler was suspended. */
5593 eReturn = eAbortSleep;
5595 else if( xPendedTicks != 0 )
5597 /* A tick interrupt has already occurred but was held pending
5598 * because the scheduler is suspended. */
5599 eReturn = eAbortSleep;
5602 #if ( INCLUDE_vTaskSuspend == 1 )
5603 else if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
5605 /* If all the tasks are in the suspended list (which might mean they
5606 * have an infinite block time rather than actually being suspended)
5607 * then it is safe to turn all clocks off and just wait for external
5609 eReturn = eNoTasksWaitingTimeout;
5611 #endif /* INCLUDE_vTaskSuspend */
5614 mtCOVERAGE_TEST_MARKER();
5617 traceRETURN_eTaskConfirmSleepModeStatus( eReturn );
5622 #endif /* configUSE_TICKLESS_IDLE */
5623 /*-----------------------------------------------------------*/
5625 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
5627 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
5633 traceENTER_vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue );
5635 if( ( xIndex >= 0 ) &&
5636 ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
5638 pxTCB = prvGetTCBFromHandle( xTaskToSet );
5639 configASSERT( pxTCB != NULL );
5640 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
5643 traceRETURN_vTaskSetThreadLocalStoragePointer();
5646 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
5647 /*-----------------------------------------------------------*/
5649 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
5651 void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
5654 void * pvReturn = NULL;
5657 traceENTER_pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex );
5659 if( ( xIndex >= 0 ) &&
5660 ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
5662 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
5663 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
5670 traceRETURN_pvTaskGetThreadLocalStoragePointer( pvReturn );
5675 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
5676 /*-----------------------------------------------------------*/
5678 #if ( portUSING_MPU_WRAPPERS == 1 )
5680 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
5681 const MemoryRegion_t * const pxRegions )
5685 traceENTER_vTaskAllocateMPURegions( xTaskToModify, pxRegions );
5687 /* If null is passed in here then we are modifying the MPU settings of
5688 * the calling task. */
5689 pxTCB = prvGetTCBFromHandle( xTaskToModify );
5691 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), pxRegions, NULL, 0 );
5693 traceRETURN_vTaskAllocateMPURegions();
5696 #endif /* portUSING_MPU_WRAPPERS */
5697 /*-----------------------------------------------------------*/
5699 static void prvInitialiseTaskLists( void )
5701 UBaseType_t uxPriority;
5703 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
5705 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
5708 vListInitialise( &xDelayedTaskList1 );
5709 vListInitialise( &xDelayedTaskList2 );
5710 vListInitialise( &xPendingReadyList );
5712 #if ( INCLUDE_vTaskDelete == 1 )
5714 vListInitialise( &xTasksWaitingTermination );
5716 #endif /* INCLUDE_vTaskDelete */
5718 #if ( INCLUDE_vTaskSuspend == 1 )
5720 vListInitialise( &xSuspendedTaskList );
5722 #endif /* INCLUDE_vTaskSuspend */
5724 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
5726 pxDelayedTaskList = &xDelayedTaskList1;
5727 pxOverflowDelayedTaskList = &xDelayedTaskList2;
5729 /*-----------------------------------------------------------*/
5731 static void prvCheckTasksWaitingTermination( void )
5733 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
5735 #if ( INCLUDE_vTaskDelete == 1 )
5739 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
5740 * being called too often in the idle task. */
5741 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
5743 #if ( configNUMBER_OF_CORES == 1 )
5745 taskENTER_CRITICAL();
5748 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
5749 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5750 --uxCurrentNumberOfTasks;
5751 --uxDeletedTasksWaitingCleanUp;
5754 taskEXIT_CRITICAL();
5756 prvDeleteTCB( pxTCB );
5758 #else /* #if( configNUMBER_OF_CORES == 1 ) */
5762 taskENTER_CRITICAL();
5764 /* For SMP, multiple idles can be running simultaneously
5765 * and we need to check that other idles did not cleanup while we were
5766 * waiting to enter the critical section. */
5767 if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
5769 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
5771 if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING )
5773 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5774 --uxCurrentNumberOfTasks;
5775 --uxDeletedTasksWaitingCleanUp;
5779 /* The TCB to be deleted still has not yet been switched out
5780 * by the scheduler, so we will just exit this loop early and
5781 * try again next time. */
5782 taskEXIT_CRITICAL();
5787 taskEXIT_CRITICAL();
5791 prvDeleteTCB( pxTCB );
5794 #endif /* #if( configNUMBER_OF_CORES == 1 ) */
5797 #endif /* INCLUDE_vTaskDelete */
5799 /*-----------------------------------------------------------*/
5801 #if ( configUSE_TRACE_FACILITY == 1 )
5803 void vTaskGetInfo( TaskHandle_t xTask,
5804 TaskStatus_t * pxTaskStatus,
5805 BaseType_t xGetFreeStackSpace,
5810 traceENTER_vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState );
5812 /* xTask is NULL then get the state of the calling task. */
5813 pxTCB = prvGetTCBFromHandle( xTask );
5815 pxTaskStatus->xHandle = pxTCB;
5816 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
5817 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
5818 pxTaskStatus->pxStackBase = pxTCB->pxStack;
5819 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
5820 pxTaskStatus->pxTopOfStack = ( StackType_t * ) pxTCB->pxTopOfStack;
5821 pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack;
5823 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
5825 #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
5827 pxTaskStatus->uxCoreAffinityMask = pxTCB->uxCoreAffinityMask;
5831 #if ( configUSE_MUTEXES == 1 )
5833 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
5837 pxTaskStatus->uxBasePriority = 0;
5841 #if ( configGENERATE_RUN_TIME_STATS == 1 )
5843 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
5847 pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0;
5851 /* Obtaining the task state is a little fiddly, so is only done if the
5852 * value of eState passed into this function is eInvalid - otherwise the
5853 * state is just set to whatever is passed in. */
5854 if( eState != eInvalid )
5856 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
5858 pxTaskStatus->eCurrentState = eRunning;
5862 pxTaskStatus->eCurrentState = eState;
5864 #if ( INCLUDE_vTaskSuspend == 1 )
5866 /* If the task is in the suspended list then there is a
5867 * chance it is actually just blocked indefinitely - so really
5868 * it should be reported as being in the Blocked state. */
5869 if( eState == eSuspended )
5873 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
5875 pxTaskStatus->eCurrentState = eBlocked;
5878 ( void ) xTaskResumeAll();
5881 #endif /* INCLUDE_vTaskSuspend */
5883 /* Tasks can be in pending ready list and other state list at the
5884 * same time. These tasks are in ready state no matter what state
5885 * list the task is in. */
5886 taskENTER_CRITICAL();
5888 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE )
5890 pxTaskStatus->eCurrentState = eReady;
5893 taskEXIT_CRITICAL();
5898 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
5901 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
5902 * parameter is provided to allow it to be skipped. */
5903 if( xGetFreeStackSpace != pdFALSE )
5905 #if ( portSTACK_GROWTH > 0 )
5907 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
5911 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
5917 pxTaskStatus->usStackHighWaterMark = 0;
5920 traceRETURN_vTaskGetInfo();
5923 #endif /* configUSE_TRACE_FACILITY */
5924 /*-----------------------------------------------------------*/
5926 #if ( configUSE_TRACE_FACILITY == 1 )
5928 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
5932 configLIST_VOLATILE TCB_t * pxNextTCB;
5933 configLIST_VOLATILE TCB_t * pxFirstTCB;
5934 UBaseType_t uxTask = 0;
5936 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
5938 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
5940 /* Populate an TaskStatus_t structure within the
5941 * pxTaskStatusArray array for each task that is referenced from
5942 * pxList. See the definition of TaskStatus_t in task.h for the
5943 * meaning of each TaskStatus_t structure member. */
5946 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
5947 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
5949 } while( pxNextTCB != pxFirstTCB );
5953 mtCOVERAGE_TEST_MARKER();
5959 #endif /* configUSE_TRACE_FACILITY */
5960 /*-----------------------------------------------------------*/
5962 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
5964 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
5966 uint32_t ulCount = 0U;
5968 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
5970 pucStackByte -= portSTACK_GROWTH;
5974 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
5976 return ( configSTACK_DEPTH_TYPE ) ulCount;
5979 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
5980 /*-----------------------------------------------------------*/
5982 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
5984 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
5985 * same except for their return type. Using configSTACK_DEPTH_TYPE allows the
5986 * user to determine the return type. It gets around the problem of the value
5987 * overflowing on 8-bit types without breaking backward compatibility for
5988 * applications that expect an 8-bit return type. */
5989 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
5992 uint8_t * pucEndOfStack;
5993 configSTACK_DEPTH_TYPE uxReturn;
5995 traceENTER_uxTaskGetStackHighWaterMark2( xTask );
5997 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
5998 * the same except for their return type. Using configSTACK_DEPTH_TYPE
5999 * allows the user to determine the return type. It gets around the
6000 * problem of the value overflowing on 8-bit types without breaking
6001 * backward compatibility for applications that expect an 8-bit return
6004 pxTCB = prvGetTCBFromHandle( xTask );
6006 #if portSTACK_GROWTH < 0
6008 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
6012 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
6016 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
6018 traceRETURN_uxTaskGetStackHighWaterMark2( uxReturn );
6023 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
6024 /*-----------------------------------------------------------*/
6026 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
6028 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
6031 uint8_t * pucEndOfStack;
6032 UBaseType_t uxReturn;
6034 traceENTER_uxTaskGetStackHighWaterMark( xTask );
6036 pxTCB = prvGetTCBFromHandle( xTask );
6038 #if portSTACK_GROWTH < 0
6040 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
6044 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
6048 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
6050 traceRETURN_uxTaskGetStackHighWaterMark( uxReturn );
6055 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
6056 /*-----------------------------------------------------------*/
6058 #if ( INCLUDE_vTaskDelete == 1 )
6060 static void prvDeleteTCB( TCB_t * pxTCB )
6062 /* This call is required specifically for the TriCore port. It must be
6063 * above the vPortFree() calls. The call is also used by ports/demos that
6064 * want to allocate and clean RAM statically. */
6065 portCLEAN_UP_TCB( pxTCB );
6067 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
6069 /* Free up the memory allocated for the task's TLS Block. */
6070 configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock );
6074 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
6076 /* The task can only have been allocated dynamically - free both
6077 * the stack and TCB. */
6078 vPortFreeStack( pxTCB->pxStack );
6081 #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
6083 /* The task could have been allocated statically or dynamically, so
6084 * check what was statically allocated before trying to free the
6086 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
6088 /* Both the stack and TCB were allocated dynamically, so both
6090 vPortFreeStack( pxTCB->pxStack );
6093 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
6095 /* Only the stack was statically allocated, so the TCB is the
6096 * only memory that must be freed. */
6101 /* Neither the stack nor the TCB were allocated dynamically, so
6102 * nothing needs to be freed. */
6103 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
6104 mtCOVERAGE_TEST_MARKER();
6107 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
6110 #endif /* INCLUDE_vTaskDelete */
6111 /*-----------------------------------------------------------*/
6113 static void prvResetNextTaskUnblockTime( void )
6115 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
6117 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
6118 * the maximum possible value so it is extremely unlikely that the
6119 * if( xTickCount >= xNextTaskUnblockTime ) test will pass until
6120 * there is an item in the delayed list. */
6121 xNextTaskUnblockTime = portMAX_DELAY;
6125 /* The new current delayed list is not empty, get the value of
6126 * the item at the head of the delayed list. This is the time at
6127 * which the task at the head of the delayed list should be removed
6128 * from the Blocked state. */
6129 xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList );
6132 /*-----------------------------------------------------------*/
6134 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUMBER_OF_CORES > 1 )
6136 #if ( configNUMBER_OF_CORES == 1 )
6137 TaskHandle_t xTaskGetCurrentTaskHandle( void )
6139 TaskHandle_t xReturn;
6141 traceENTER_xTaskGetCurrentTaskHandle();
6143 /* A critical section is not required as this is not called from
6144 * an interrupt and the current TCB will always be the same for any
6145 * individual execution thread. */
6146 xReturn = pxCurrentTCB;
6148 traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
6152 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
6153 TaskHandle_t xTaskGetCurrentTaskHandle( void )
6155 TaskHandle_t xReturn;
6156 UBaseType_t uxSavedInterruptStatus;
6158 traceENTER_xTaskGetCurrentTaskHandle();
6160 uxSavedInterruptStatus = portSET_INTERRUPT_MASK();
6162 xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
6164 portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus );
6166 traceRETURN_xTaskGetCurrentTaskHandle( xReturn );
6171 TaskHandle_t xTaskGetCurrentTaskHandleCPU( BaseType_t xCoreID )
6173 TaskHandle_t xReturn = NULL;
6175 traceENTER_xTaskGetCurrentTaskHandleCPU( xCoreID );
6177 if( taskVALID_CORE_ID( xCoreID ) != pdFALSE )
6179 xReturn = pxCurrentTCBs[ xCoreID ];
6182 traceRETURN_xTaskGetCurrentTaskHandleCPU( xReturn );
6186 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
6188 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
6189 /*-----------------------------------------------------------*/
6191 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
6193 BaseType_t xTaskGetSchedulerState( void )
6197 traceENTER_xTaskGetSchedulerState();
6199 if( xSchedulerRunning == pdFALSE )
6201 xReturn = taskSCHEDULER_NOT_STARTED;
6205 #if ( configNUMBER_OF_CORES > 1 )
6206 taskENTER_CRITICAL();
6209 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
6211 xReturn = taskSCHEDULER_RUNNING;
6215 xReturn = taskSCHEDULER_SUSPENDED;
6218 #if ( configNUMBER_OF_CORES > 1 )
6219 taskEXIT_CRITICAL();
6223 traceRETURN_xTaskGetSchedulerState( xReturn );
6228 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
6229 /*-----------------------------------------------------------*/
6231 #if ( configUSE_MUTEXES == 1 )
6233 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
6235 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
6236 BaseType_t xReturn = pdFALSE;
6238 traceENTER_xTaskPriorityInherit( pxMutexHolder );
6240 /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority
6241 * inheritance is not applied in this scenario. */
6242 if( pxMutexHolder != NULL )
6244 /* If the holder of the mutex has a priority below the priority of
6245 * the task attempting to obtain the mutex then it will temporarily
6246 * inherit the priority of the task attempting to obtain the mutex. */
6247 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
6249 /* Adjust the mutex holder state to account for its new
6250 * priority. Only reset the event list item value if the value is
6251 * not being used for anything else. */
6252 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
6254 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
6258 mtCOVERAGE_TEST_MARKER();
6261 /* If the task being modified is in the ready state it will need
6262 * to be moved into a new list. */
6263 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
6265 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6267 /* It is known that the task is in its ready list so
6268 * there is no need to check again and the port level
6269 * reset macro can be called directly. */
6270 portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
6274 mtCOVERAGE_TEST_MARKER();
6277 /* Inherit the priority before being moved into the new list. */
6278 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
6279 prvAddTaskToReadyList( pxMutexHolderTCB );
6280 #if ( configNUMBER_OF_CORES > 1 )
6282 /* The priority of the task is raised. Yield for this task
6283 * if it is not running. */
6284 if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE )
6286 prvYieldForTask( pxMutexHolderTCB );
6289 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6293 /* Just inherit the priority. */
6294 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
6297 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
6299 /* Inheritance occurred. */
6304 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
6306 /* The base priority of the mutex holder is lower than the
6307 * priority of the task attempting to take the mutex, but the
6308 * current priority of the mutex holder is not lower than the
6309 * priority of the task attempting to take the mutex.
6310 * Therefore the mutex holder must have already inherited a
6311 * priority, but inheritance would have occurred if that had
6312 * not been the case. */
6317 mtCOVERAGE_TEST_MARKER();
6323 mtCOVERAGE_TEST_MARKER();
6326 traceRETURN_xTaskPriorityInherit( xReturn );
6331 #endif /* configUSE_MUTEXES */
6332 /*-----------------------------------------------------------*/
6334 #if ( configUSE_MUTEXES == 1 )
6336 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
6338 TCB_t * const pxTCB = pxMutexHolder;
6339 BaseType_t xReturn = pdFALSE;
6341 traceENTER_xTaskPriorityDisinherit( pxMutexHolder );
6343 if( pxMutexHolder != NULL )
6345 /* A task can only have an inherited priority if it holds the mutex.
6346 * If the mutex is held by a task then it cannot be given from an
6347 * interrupt, and if a mutex is given by the holding task then it must
6348 * be the running state task. */
6349 configASSERT( pxTCB == pxCurrentTCB );
6350 configASSERT( pxTCB->uxMutexesHeld );
6351 ( pxTCB->uxMutexesHeld )--;
6353 /* Has the holder of the mutex inherited the priority of another
6355 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
6357 /* Only disinherit if no other mutexes are held. */
6358 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
6360 /* A task can only have an inherited priority if it holds
6361 * the mutex. If the mutex is held by a task then it cannot be
6362 * given from an interrupt, and if a mutex is given by the
6363 * holding task then it must be the running state task. Remove
6364 * the holding task from the ready list. */
6365 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6367 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
6371 mtCOVERAGE_TEST_MARKER();
6374 /* Disinherit the priority before adding the task into the
6375 * new ready list. */
6376 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
6377 pxTCB->uxPriority = pxTCB->uxBasePriority;
6379 /* Reset the event list item value. It cannot be in use for
6380 * any other purpose if this task is running, and it must be
6381 * running to give back the mutex. */
6382 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
6383 prvAddTaskToReadyList( pxTCB );
6384 #if ( configNUMBER_OF_CORES > 1 )
6386 /* The priority of the task is dropped. Yield the core on
6387 * which the task is running. */
6388 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6390 prvYieldCore( pxTCB->xTaskRunState );
6393 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6395 /* Return true to indicate that a context switch is required.
6396 * This is only actually required in the corner case whereby
6397 * multiple mutexes were held and the mutexes were given back
6398 * in an order different to that in which they were taken.
6399 * If a context switch did not occur when the first mutex was
6400 * returned, even if a task was waiting on it, then a context
6401 * switch should occur when the last mutex is returned whether
6402 * a task is waiting on it or not. */
6407 mtCOVERAGE_TEST_MARKER();
6412 mtCOVERAGE_TEST_MARKER();
6417 mtCOVERAGE_TEST_MARKER();
6420 traceRETURN_xTaskPriorityDisinherit( xReturn );
6425 #endif /* configUSE_MUTEXES */
6426 /*-----------------------------------------------------------*/
6428 #if ( configUSE_MUTEXES == 1 )
6430 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
6431 UBaseType_t uxHighestPriorityWaitingTask )
6433 TCB_t * const pxTCB = pxMutexHolder;
6434 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
6435 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
6437 traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, uxHighestPriorityWaitingTask );
6439 if( pxMutexHolder != NULL )
6441 /* If pxMutexHolder is not NULL then the holder must hold at least
6443 configASSERT( pxTCB->uxMutexesHeld );
6445 /* Determine the priority to which the priority of the task that
6446 * holds the mutex should be set. This will be the greater of the
6447 * holding task's base priority and the priority of the highest
6448 * priority task that is waiting to obtain the mutex. */
6449 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
6451 uxPriorityToUse = uxHighestPriorityWaitingTask;
6455 uxPriorityToUse = pxTCB->uxBasePriority;
6458 /* Does the priority need to change? */
6459 if( pxTCB->uxPriority != uxPriorityToUse )
6461 /* Only disinherit if no other mutexes are held. This is a
6462 * simplification in the priority inheritance implementation. If
6463 * the task that holds the mutex is also holding other mutexes then
6464 * the other mutexes may have caused the priority inheritance. */
6465 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
6467 /* If a task has timed out because it already holds the
6468 * mutex it was trying to obtain then it cannot of inherited
6469 * its own priority. */
6470 configASSERT( pxTCB != pxCurrentTCB );
6472 /* Disinherit the priority, remembering the previous
6473 * priority to facilitate determining the subject task's
6475 traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse );
6476 uxPriorityUsedOnEntry = pxTCB->uxPriority;
6477 pxTCB->uxPriority = uxPriorityToUse;
6479 /* Only reset the event list item value if the value is not
6480 * being used for anything else. */
6481 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
6483 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
6487 mtCOVERAGE_TEST_MARKER();
6490 /* If the running task is not the task that holds the mutex
6491 * then the task that holds the mutex could be in either the
6492 * Ready, Blocked or Suspended states. Only remove the task
6493 * from its current state list if it is in the Ready state as
6494 * the task's priority is going to change and there is one
6495 * Ready list per priority. */
6496 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
6498 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
6500 /* It is known that the task is in its ready list so
6501 * there is no need to check again and the port level
6502 * reset macro can be called directly. */
6503 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
6507 mtCOVERAGE_TEST_MARKER();
6510 prvAddTaskToReadyList( pxTCB );
6511 #if ( configNUMBER_OF_CORES > 1 )
6513 /* The priority of the task is dropped. Yield the core on
6514 * which the task is running. */
6515 if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE )
6517 prvYieldCore( pxTCB->xTaskRunState );
6520 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
6524 mtCOVERAGE_TEST_MARKER();
6529 mtCOVERAGE_TEST_MARKER();
6534 mtCOVERAGE_TEST_MARKER();
6539 mtCOVERAGE_TEST_MARKER();
6542 traceRETURN_vTaskPriorityDisinheritAfterTimeout();
6545 #endif /* configUSE_MUTEXES */
6546 /*-----------------------------------------------------------*/
6548 #if ( configNUMBER_OF_CORES > 1 )
6550 /* If not in a critical section then yield immediately.
6551 * Otherwise set xYieldPendings to true to wait to
6552 * yield until exiting the critical section.
6554 void vTaskYieldWithinAPI( void )
6556 traceENTER_vTaskYieldWithinAPI();
6558 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6564 xYieldPendings[ portGET_CORE_ID() ] = pdTRUE;
6567 traceRETURN_vTaskYieldWithinAPI();
6569 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
6571 /*-----------------------------------------------------------*/
6573 #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
6575 void vTaskEnterCritical( void )
6577 traceENTER_vTaskEnterCritical();
6579 portDISABLE_INTERRUPTS();
6581 if( xSchedulerRunning != pdFALSE )
6583 ( pxCurrentTCB->uxCriticalNesting )++;
6585 /* This is not the interrupt safe version of the enter critical
6586 * function so assert() if it is being called from an interrupt
6587 * context. Only API functions that end in "FromISR" can be used in an
6588 * interrupt. Only assert if the critical nesting count is 1 to
6589 * protect against recursive calls if the assert function also uses a
6590 * critical section. */
6591 if( pxCurrentTCB->uxCriticalNesting == 1 )
6593 portASSERT_IF_IN_ISR();
6598 mtCOVERAGE_TEST_MARKER();
6601 traceRETURN_vTaskEnterCritical();
6604 #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
6605 /*-----------------------------------------------------------*/
6607 #if ( configNUMBER_OF_CORES > 1 )
6609 void vTaskEnterCritical( void )
6611 traceENTER_vTaskEnterCritical();
6613 portDISABLE_INTERRUPTS();
6615 if( xSchedulerRunning != pdFALSE )
6617 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6619 portGET_TASK_LOCK();
6623 portINCREMENT_CRITICAL_NESTING_COUNT();
6625 /* This is not the interrupt safe version of the enter critical
6626 * function so assert() if it is being called from an interrupt
6627 * context. Only API functions that end in "FromISR" can be used in an
6628 * interrupt. Only assert if the critical nesting count is 1 to
6629 * protect against recursive calls if the assert function also uses a
6630 * critical section. */
6631 if( portGET_CRITICAL_NESTING_COUNT() == 1U )
6633 portASSERT_IF_IN_ISR();
6635 if( uxSchedulerSuspended == 0U )
6637 /* The only time there would be a problem is if this is called
6638 * before a context switch and vTaskExitCritical() is called
6639 * after pxCurrentTCB changes. Therefore this should not be
6640 * used within vTaskSwitchContext(). */
6641 prvCheckForRunStateChange();
6647 mtCOVERAGE_TEST_MARKER();
6650 traceRETURN_vTaskEnterCritical();
6653 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
6655 /*-----------------------------------------------------------*/
6657 #if ( configNUMBER_OF_CORES > 1 )
6659 UBaseType_t vTaskEnterCriticalFromISR( void )
6661 UBaseType_t uxSavedInterruptStatus = 0;
6663 traceENTER_vTaskEnterCriticalFromISR();
6665 if( xSchedulerRunning != pdFALSE )
6667 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
6669 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6674 portINCREMENT_CRITICAL_NESTING_COUNT();
6678 mtCOVERAGE_TEST_MARKER();
6681 traceRETURN_vTaskEnterCriticalFromISR( uxSavedInterruptStatus );
6683 return uxSavedInterruptStatus;
6686 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
6687 /*-----------------------------------------------------------*/
6689 #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) )
6691 void vTaskExitCritical( void )
6693 traceENTER_vTaskExitCritical();
6695 if( xSchedulerRunning != pdFALSE )
6697 /* If pxCurrentTCB->uxCriticalNesting is zero then this function
6698 * does not match a previous call to vTaskEnterCritical(). */
6699 configASSERT( pxCurrentTCB->uxCriticalNesting > 0U );
6701 /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
6702 * to exit critical section from ISR. */
6703 portASSERT_IF_IN_ISR();
6705 if( pxCurrentTCB->uxCriticalNesting > 0U )
6707 ( pxCurrentTCB->uxCriticalNesting )--;
6709 if( pxCurrentTCB->uxCriticalNesting == 0U )
6711 portENABLE_INTERRUPTS();
6715 mtCOVERAGE_TEST_MARKER();
6720 mtCOVERAGE_TEST_MARKER();
6725 mtCOVERAGE_TEST_MARKER();
6728 traceRETURN_vTaskExitCritical();
6731 #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */
6732 /*-----------------------------------------------------------*/
6734 #if ( configNUMBER_OF_CORES > 1 )
6736 void vTaskExitCritical( void )
6738 traceENTER_vTaskExitCritical();
6740 if( xSchedulerRunning != pdFALSE )
6742 /* If critical nesting count is zero then this function
6743 * does not match a previous call to vTaskEnterCritical(). */
6744 configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
6746 /* This function should not be called in ISR. Use vTaskExitCriticalFromISR
6747 * to exit critical section from ISR. */
6748 portASSERT_IF_IN_ISR();
6750 if( portGET_CRITICAL_NESTING_COUNT() > 0U )
6752 portDECREMENT_CRITICAL_NESTING_COUNT();
6754 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6756 BaseType_t xYieldCurrentTask;
6758 /* Get the xYieldPending stats inside the critical section. */
6759 xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ];
6761 portRELEASE_ISR_LOCK();
6762 portRELEASE_TASK_LOCK();
6763 portENABLE_INTERRUPTS();
6765 /* When a task yields in a critical section it just sets
6766 * xYieldPending to true. So now that we have exited the
6767 * critical section check if xYieldPending is true, and
6769 if( xYieldCurrentTask != pdFALSE )
6776 mtCOVERAGE_TEST_MARKER();
6781 mtCOVERAGE_TEST_MARKER();
6786 mtCOVERAGE_TEST_MARKER();
6789 traceRETURN_vTaskExitCritical();
6792 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
6793 /*-----------------------------------------------------------*/
6795 #if ( configNUMBER_OF_CORES > 1 )
6797 void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus )
6799 traceENTER_vTaskExitCriticalFromISR( uxSavedInterruptStatus );
6801 if( xSchedulerRunning != pdFALSE )
6803 /* If critical nesting count is zero then this function
6804 * does not match a previous call to vTaskEnterCritical(). */
6805 configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U );
6807 if( portGET_CRITICAL_NESTING_COUNT() > 0U )
6809 portDECREMENT_CRITICAL_NESTING_COUNT();
6811 if( portGET_CRITICAL_NESTING_COUNT() == 0U )
6813 portRELEASE_ISR_LOCK();
6814 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
6818 mtCOVERAGE_TEST_MARKER();
6823 mtCOVERAGE_TEST_MARKER();
6828 mtCOVERAGE_TEST_MARKER();
6831 traceRETURN_vTaskExitCriticalFromISR();
6834 #endif /* #if ( configNUMBER_OF_CORES > 1 ) */
6835 /*-----------------------------------------------------------*/
6837 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
6839 static char * prvWriteNameToBuffer( char * pcBuffer,
6840 const char * pcTaskName )
6844 /* Start by copying the entire string. */
6845 ( void ) strcpy( pcBuffer, pcTaskName );
6847 /* Pad the end of the string with spaces to ensure columns line up when
6849 for( x = strlen( pcBuffer ); x < ( size_t ) ( ( size_t ) configMAX_TASK_NAME_LEN - 1U ); x++ )
6851 pcBuffer[ x ] = ' ';
6855 pcBuffer[ x ] = ( char ) 0x00;
6857 /* Return the new end of string. */
6858 return &( pcBuffer[ x ] );
6861 #endif /* ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
6862 /*-----------------------------------------------------------*/
6864 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
6866 void vTaskListTasks( char * pcWriteBuffer,
6867 size_t uxBufferLength )
6869 TaskStatus_t * pxTaskStatusArray;
6870 size_t uxConsumedBufferLength = 0;
6871 size_t uxCharsWrittenBySnprintf;
6872 int iSnprintfReturnValue;
6873 BaseType_t xOutputBufferFull = pdFALSE;
6874 UBaseType_t uxArraySize, x;
6877 traceENTER_vTaskListTasks( pcWriteBuffer, uxBufferLength );
6882 * This function is provided for convenience only, and is used by many
6883 * of the demo applications. Do not consider it to be part of the
6886 * vTaskListTasks() calls uxTaskGetSystemState(), then formats part of the
6887 * uxTaskGetSystemState() output into a human readable table that
6888 * displays task: names, states, priority, stack usage and task number.
6889 * Stack usage specified as the number of unused StackType_t words stack can hold
6890 * on top of stack - not the number of bytes.
6892 * vTaskListTasks() has a dependency on the snprintf() C library function that
6893 * might bloat the code size, use a lot of stack, and provide different
6894 * results on different platforms. An alternative, tiny, third party,
6895 * and limited functionality implementation of snprintf() is provided in
6896 * many of the FreeRTOS/Demo sub-directories in a file called
6897 * printf-stdarg.c (note printf-stdarg.c does not provide a full
6898 * snprintf() implementation!).
6900 * It is recommended that production systems call uxTaskGetSystemState()
6901 * directly to get access to raw stats data, rather than indirectly
6902 * through a call to vTaskListTasks().
6906 /* Make sure the write buffer does not contain a string. */
6907 *pcWriteBuffer = ( char ) 0x00;
6909 /* Take a snapshot of the number of tasks in case it changes while this
6910 * function is executing. */
6911 uxArraySize = uxCurrentNumberOfTasks;
6913 /* Allocate an array index for each task. NOTE! if
6914 * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
6915 * equate to NULL. */
6916 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
6918 if( pxTaskStatusArray != NULL )
6920 /* Generate the (binary) data. */
6921 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
6923 /* Create a human readable table from the binary data. */
6924 for( x = 0; ( x < uxArraySize ) && ( xOutputBufferFull == pdFALSE ); x++ )
6926 switch( pxTaskStatusArray[ x ].eCurrentState )
6929 cStatus = tskRUNNING_CHAR;
6933 cStatus = tskREADY_CHAR;
6937 cStatus = tskBLOCKED_CHAR;
6941 cStatus = tskSUSPENDED_CHAR;
6945 cStatus = tskDELETED_CHAR;
6948 case eInvalid: /* Fall through. */
6949 default: /* Should not get here, but it is included
6950 * to prevent static checking errors. */
6951 cStatus = ( char ) 0x00;
6955 /* Is there enough space in the buffer to hold task name? */
6956 if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
6958 /* Write the task name to the string, padding with spaces so it
6959 * can be printed in tabular form more easily. */
6960 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
6961 /* Do not count the terminating null character. */
6962 uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1 );
6964 /* Is there space left in the buffer? -1 is done because snprintf
6965 * writes a terminating null character. So we are essentially
6966 * checking if the buffer has space to write at least one non-null
6968 if( uxConsumedBufferLength < ( uxBufferLength - 1 ) )
6970 /* Write the rest of the string. */
6971 iSnprintfReturnValue = snprintf( pcWriteBuffer,
6972 uxBufferLength - uxConsumedBufferLength,
6973 "\t%c\t%u\t%u\t%u\r\n",
6975 ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
6976 ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
6977 ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
6978 uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
6980 uxConsumedBufferLength += uxCharsWrittenBySnprintf;
6981 pcWriteBuffer += uxCharsWrittenBySnprintf; /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
6985 xOutputBufferFull = pdTRUE;
6990 xOutputBufferFull = pdTRUE;
6994 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
6995 * is 0 then vPortFree() will be #defined to nothing. */
6996 vPortFree( pxTaskStatusArray );
7000 mtCOVERAGE_TEST_MARKER();
7003 traceRETURN_vTaskListTasks();
7006 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
7007 /*----------------------------------------------------------*/
7009 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configUSE_TRACE_FACILITY == 1 ) )
7011 void vTaskGetRunTimeStatistics( char * pcWriteBuffer,
7012 size_t uxBufferLength )
7014 TaskStatus_t * pxTaskStatusArray;
7015 size_t uxConsumedBufferLength = 0;
7016 size_t uxCharsWrittenBySnprintf;
7017 int iSnprintfReturnValue;
7018 BaseType_t xOutputBufferFull = pdFALSE;
7019 UBaseType_t uxArraySize, x;
7020 configRUN_TIME_COUNTER_TYPE ulTotalTime, ulStatsAsPercentage;
7022 traceENTER_vTaskGetRunTimeStatistics( pcWriteBuffer, uxBufferLength );
7027 * This function is provided for convenience only, and is used by many
7028 * of the demo applications. Do not consider it to be part of the
7031 * vTaskGetRunTimeStatistics() calls uxTaskGetSystemState(), then formats part
7032 * of the uxTaskGetSystemState() output into a human readable table that
7033 * displays the amount of time each task has spent in the Running state
7034 * in both absolute and percentage terms.
7036 * vTaskGetRunTimeStatistics() has a dependency on the snprintf() C library
7037 * function that might bloat the code size, use a lot of stack, and
7038 * provide different results on different platforms. An alternative,
7039 * tiny, third party, and limited functionality implementation of
7040 * snprintf() is provided in many of the FreeRTOS/Demo sub-directories in
7041 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
7042 * a full snprintf() implementation!).
7044 * It is recommended that production systems call uxTaskGetSystemState()
7045 * directly to get access to raw stats data, rather than indirectly
7046 * through a call to vTaskGetRunTimeStatistics().
7049 /* Make sure the write buffer does not contain a string. */
7050 *pcWriteBuffer = ( char ) 0x00;
7052 /* Take a snapshot of the number of tasks in case it changes while this
7053 * function is executing. */
7054 uxArraySize = uxCurrentNumberOfTasks;
7056 /* Allocate an array index for each task. NOTE! If
7057 * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
7058 * equate to NULL. */
7059 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
7061 if( pxTaskStatusArray != NULL )
7063 /* Generate the (binary) data. */
7064 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
7066 /* For percentage calculations. */
7067 ulTotalTime /= 100UL;
7069 /* Avoid divide by zero errors. */
7070 if( ulTotalTime > 0UL )
7072 /* Create a human readable table from the binary data. */
7073 for( x = 0; ( x < uxArraySize ) && ( xOutputBufferFull == pdFALSE ); x++ )
7075 /* What percentage of the total run time has the task used?
7076 * This will always be rounded down to the nearest integer.
7077 * ulTotalRunTime has already been divided by 100. */
7078 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
7080 /* Is there enough space in the buffer to hold task name? */
7081 if( ( uxConsumedBufferLength + configMAX_TASK_NAME_LEN ) <= uxBufferLength )
7083 /* Write the task name to the string, padding with
7084 * spaces so it can be printed in tabular form more
7086 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
7087 /* Do not count the terminating null character. */
7088 uxConsumedBufferLength = uxConsumedBufferLength + ( configMAX_TASK_NAME_LEN - 1 );
7090 /* Is there space left in the buffer? -1 is done because snprintf
7091 * writes a terminating null character. So we are essentially
7092 * checking if the buffer has space to write at least one non-null
7094 if( uxConsumedBufferLength < ( uxBufferLength - 1 ) )
7096 if( ulStatsAsPercentage > 0UL )
7098 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
7100 iSnprintfReturnValue = snprintf( pcWriteBuffer,
7101 uxBufferLength - uxConsumedBufferLength,
7102 "\t%lu\t\t%lu%%\r\n",
7103 pxTaskStatusArray[ x ].ulRunTimeCounter,
7104 ulStatsAsPercentage );
7108 /* sizeof( int ) == sizeof( long ) so a smaller
7109 * printf() library can be used. */
7110 iSnprintfReturnValue = snprintf( pcWriteBuffer,
7111 uxBufferLength - uxConsumedBufferLength,
7113 ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter,
7114 ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
7116 #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7120 /* If the percentage is zero here then the task has
7121 * consumed less than 1% of the total run time. */
7122 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
7124 iSnprintfReturnValue = snprintf( pcWriteBuffer,
7125 uxBufferLength - uxConsumedBufferLength,
7126 "\t%lu\t\t<1%%\r\n",
7127 pxTaskStatusArray[ x ].ulRunTimeCounter );
7131 /* sizeof( int ) == sizeof( long ) so a smaller
7132 * printf() library can be used. */
7133 iSnprintfReturnValue = snprintf( pcWriteBuffer,
7134 uxBufferLength - uxConsumedBufferLength,
7136 ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
7138 #endif /* ifdef portLU_PRINTF_SPECIFIER_REQUIRED */
7141 uxCharsWrittenBySnprintf = prvSnprintfReturnValueToCharsWritten( iSnprintfReturnValue, uxBufferLength - uxConsumedBufferLength );
7142 uxConsumedBufferLength += uxCharsWrittenBySnprintf;
7143 pcWriteBuffer += uxCharsWrittenBySnprintf; /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
7147 xOutputBufferFull = pdTRUE;
7152 xOutputBufferFull = pdTRUE;
7158 mtCOVERAGE_TEST_MARKER();
7161 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
7162 * is 0 then vPortFree() will be #defined to nothing. */
7163 vPortFree( pxTaskStatusArray );
7167 mtCOVERAGE_TEST_MARKER();
7170 traceRETURN_vTaskGetRunTimeStatistics();
7173 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
7174 /*-----------------------------------------------------------*/
7176 TickType_t uxTaskResetEventItemValue( void )
7178 TickType_t uxReturn;
7180 traceENTER_uxTaskResetEventItemValue();
7182 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
7184 /* Reset the event list item to its normal value - so it can be used with
7185 * queues and semaphores. */
7186 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
7188 traceRETURN_uxTaskResetEventItemValue( uxReturn );
7192 /*-----------------------------------------------------------*/
7194 #if ( configUSE_MUTEXES == 1 )
7196 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
7200 traceENTER_pvTaskIncrementMutexHeldCount();
7202 pxTCB = pxCurrentTCB;
7204 /* If xSemaphoreCreateMutex() is called before any tasks have been created
7205 * then pxCurrentTCB will be NULL. */
7208 ( pxTCB->uxMutexesHeld )++;
7211 traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB );
7216 #endif /* configUSE_MUTEXES */
7217 /*-----------------------------------------------------------*/
7219 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7221 uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
7222 BaseType_t xClearCountOnExit,
7223 TickType_t xTicksToWait )
7227 traceENTER_ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait );
7229 configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7231 taskENTER_CRITICAL();
7233 /* Only block if the notification count is not already non-zero. */
7234 if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL )
7236 /* Mark this task as waiting for a notification. */
7237 pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
7239 if( xTicksToWait > ( TickType_t ) 0 )
7241 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
7242 traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn );
7244 /* All ports are written to allow a yield in a critical
7245 * section (some will yield immediately, others wait until the
7246 * critical section exits) - but it is not something that
7247 * application code should ever do. */
7248 taskYIELD_WITHIN_API();
7252 mtCOVERAGE_TEST_MARKER();
7257 mtCOVERAGE_TEST_MARKER();
7260 taskEXIT_CRITICAL();
7262 taskENTER_CRITICAL();
7264 traceTASK_NOTIFY_TAKE( uxIndexToWaitOn );
7265 ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
7267 if( ulReturn != 0UL )
7269 if( xClearCountOnExit != pdFALSE )
7271 pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = 0UL;
7275 pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ulReturn - ( uint32_t ) 1;
7280 mtCOVERAGE_TEST_MARKER();
7283 pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
7285 taskEXIT_CRITICAL();
7287 traceRETURN_ulTaskGenericNotifyTake( ulReturn );
7292 #endif /* configUSE_TASK_NOTIFICATIONS */
7293 /*-----------------------------------------------------------*/
7295 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7297 BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
7298 uint32_t ulBitsToClearOnEntry,
7299 uint32_t ulBitsToClearOnExit,
7300 uint32_t * pulNotificationValue,
7301 TickType_t xTicksToWait )
7305 traceENTER_xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait );
7307 configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7309 taskENTER_CRITICAL();
7311 /* Only block if a notification is not already pending. */
7312 if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
7314 /* Clear bits in the task's notification value as bits may get
7315 * set by the notifying task or interrupt. This can be used to
7316 * clear the value to zero. */
7317 pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry;
7319 /* Mark this task as waiting for a notification. */
7320 pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION;
7322 if( xTicksToWait > ( TickType_t ) 0 )
7324 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
7325 traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn );
7327 /* All ports are written to allow a yield in a critical
7328 * section (some will yield immediately, others wait until the
7329 * critical section exits) - but it is not something that
7330 * application code should ever do. */
7331 taskYIELD_WITHIN_API();
7335 mtCOVERAGE_TEST_MARKER();
7340 mtCOVERAGE_TEST_MARKER();
7343 taskEXIT_CRITICAL();
7345 taskENTER_CRITICAL();
7347 traceTASK_NOTIFY_WAIT( uxIndexToWaitOn );
7349 if( pulNotificationValue != NULL )
7351 /* Output the current notification value, which may or may not
7353 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ];
7356 /* If ucNotifyValue is set then either the task never entered the
7357 * blocked state (because a notification was already pending) or the
7358 * task unblocked because of a notification. Otherwise the task
7359 * unblocked because of a timeout. */
7360 if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED )
7362 /* A notification was not received. */
7367 /* A notification was already pending or a notification was
7368 * received while the task was waiting. */
7369 pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnExit;
7373 pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION;
7375 taskEXIT_CRITICAL();
7377 traceRETURN_xTaskGenericNotifyWait( xReturn );
7382 #endif /* configUSE_TASK_NOTIFICATIONS */
7383 /*-----------------------------------------------------------*/
7385 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7387 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
7388 UBaseType_t uxIndexToNotify,
7390 eNotifyAction eAction,
7391 uint32_t * pulPreviousNotificationValue )
7394 BaseType_t xReturn = pdPASS;
7395 uint8_t ucOriginalNotifyState;
7397 traceENTER_xTaskGenericNotify( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue );
7399 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7400 configASSERT( xTaskToNotify );
7401 pxTCB = xTaskToNotify;
7403 taskENTER_CRITICAL();
7405 if( pulPreviousNotificationValue != NULL )
7407 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
7410 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
7412 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
7417 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
7421 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
7424 case eSetValueWithOverwrite:
7425 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7428 case eSetValueWithoutOverwrite:
7430 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
7432 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7436 /* The value could not be written to the task. */
7444 /* The task is being notified without its notify value being
7450 /* Should not get here if all enums are handled.
7451 * Artificially force an assert by testing a value the
7452 * compiler can't assume is const. */
7453 configASSERT( xTickCount == ( TickType_t ) 0 );
7458 traceTASK_NOTIFY( uxIndexToNotify );
7460 /* If the task is in the blocked state specifically to wait for a
7461 * notification then unblock it now. */
7462 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
7464 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
7465 prvAddTaskToReadyList( pxTCB );
7467 /* The task should not have been on an event list. */
7468 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
7470 #if ( configUSE_TICKLESS_IDLE != 0 )
7472 /* If a task is blocked waiting for a notification then
7473 * xNextTaskUnblockTime might be set to the blocked task's time
7474 * out time. If the task is unblocked for a reason other than
7475 * a timeout xNextTaskUnblockTime is normally left unchanged,
7476 * because it will automatically get reset to a new value when
7477 * the tick count equals xNextTaskUnblockTime. However if
7478 * tickless idling is used it might be more important to enter
7479 * sleep mode at the earliest possible time - so reset
7480 * xNextTaskUnblockTime here to ensure it is updated at the
7481 * earliest possible time. */
7482 prvResetNextTaskUnblockTime();
7486 /* Check if the notified task has a priority above the currently
7487 * executing task. */
7488 taskYIELD_ANY_CORE_IF_USING_PREEMPTION( pxTCB );
7492 mtCOVERAGE_TEST_MARKER();
7495 taskEXIT_CRITICAL();
7497 traceRETURN_xTaskGenericNotify( xReturn );
7502 #endif /* configUSE_TASK_NOTIFICATIONS */
7503 /*-----------------------------------------------------------*/
7505 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7507 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
7508 UBaseType_t uxIndexToNotify,
7510 eNotifyAction eAction,
7511 uint32_t * pulPreviousNotificationValue,
7512 BaseType_t * pxHigherPriorityTaskWoken )
7515 uint8_t ucOriginalNotifyState;
7516 BaseType_t xReturn = pdPASS;
7517 UBaseType_t uxSavedInterruptStatus;
7519 traceENTER_xTaskGenericNotifyFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken );
7521 configASSERT( xTaskToNotify );
7522 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7524 /* RTOS ports that support interrupt nesting have the concept of a
7525 * maximum system call (or maximum API call) interrupt priority.
7526 * Interrupts that are above the maximum system call priority are keep
7527 * permanently enabled, even when the RTOS kernel is in a critical section,
7528 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
7529 * is defined in FreeRTOSConfig.h then
7530 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
7531 * failure if a FreeRTOS API function is called from an interrupt that has
7532 * been assigned a priority above the configured maximum system call
7533 * priority. Only FreeRTOS functions that end in FromISR can be called
7534 * from interrupts that have been assigned a priority at or (logically)
7535 * below the maximum system call interrupt priority. FreeRTOS maintains a
7536 * separate interrupt safe API to ensure interrupt entry is as fast and as
7537 * simple as possible. More information (albeit Cortex-M specific) is
7538 * provided on the following link:
7539 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
7540 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
7542 pxTCB = xTaskToNotify;
7544 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
7546 if( pulPreviousNotificationValue != NULL )
7548 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
7551 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
7552 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
7557 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
7561 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
7564 case eSetValueWithOverwrite:
7565 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7568 case eSetValueWithoutOverwrite:
7570 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
7572 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
7576 /* The value could not be written to the task. */
7584 /* The task is being notified without its notify value being
7590 /* Should not get here if all enums are handled.
7591 * Artificially force an assert by testing a value the
7592 * compiler can't assume is const. */
7593 configASSERT( xTickCount == ( TickType_t ) 0 );
7597 traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
7599 /* If the task is in the blocked state specifically to wait for a
7600 * notification then unblock it now. */
7601 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
7603 /* The task should not have been on an event list. */
7604 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
7606 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
7608 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
7609 prvAddTaskToReadyList( pxTCB );
7613 /* The delayed and ready lists cannot be accessed, so hold
7614 * this task pending until the scheduler is resumed. */
7615 listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
7618 #if ( configNUMBER_OF_CORES == 1 )
7620 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
7622 /* The notified task has a priority above the currently
7623 * executing task so a yield is required. */
7624 if( pxHigherPriorityTaskWoken != NULL )
7626 *pxHigherPriorityTaskWoken = pdTRUE;
7629 /* Mark that a yield is pending in case the user is not
7630 * using the "xHigherPriorityTaskWoken" parameter to an ISR
7631 * safe FreeRTOS function. */
7632 xYieldPendings[ 0 ] = pdTRUE;
7636 mtCOVERAGE_TEST_MARKER();
7639 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
7641 #if ( configUSE_PREEMPTION == 1 )
7643 prvYieldForTask( pxTCB );
7645 if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
7647 if( pxHigherPriorityTaskWoken != NULL )
7649 *pxHigherPriorityTaskWoken = pdTRUE;
7653 #endif /* if ( configUSE_PREEMPTION == 1 ) */
7655 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
7658 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
7660 traceRETURN_xTaskGenericNotifyFromISR( xReturn );
7665 #endif /* configUSE_TASK_NOTIFICATIONS */
7666 /*-----------------------------------------------------------*/
7668 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7670 void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
7671 UBaseType_t uxIndexToNotify,
7672 BaseType_t * pxHigherPriorityTaskWoken )
7675 uint8_t ucOriginalNotifyState;
7676 UBaseType_t uxSavedInterruptStatus;
7678 traceENTER_vTaskGenericNotifyGiveFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken );
7680 configASSERT( xTaskToNotify );
7681 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7683 /* RTOS ports that support interrupt nesting have the concept of a
7684 * maximum system call (or maximum API call) interrupt priority.
7685 * Interrupts that are above the maximum system call priority are keep
7686 * permanently enabled, even when the RTOS kernel is in a critical section,
7687 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
7688 * is defined in FreeRTOSConfig.h then
7689 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
7690 * failure if a FreeRTOS API function is called from an interrupt that has
7691 * been assigned a priority above the configured maximum system call
7692 * priority. Only FreeRTOS functions that end in FromISR can be called
7693 * from interrupts that have been assigned a priority at or (logically)
7694 * below the maximum system call interrupt priority. FreeRTOS maintains a
7695 * separate interrupt safe API to ensure interrupt entry is as fast and as
7696 * simple as possible. More information (albeit Cortex-M specific) is
7697 * provided on the following link:
7698 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
7699 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
7701 pxTCB = xTaskToNotify;
7703 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
7705 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
7706 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
7708 /* 'Giving' is equivalent to incrementing a count in a counting
7710 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
7712 traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
7714 /* If the task is in the blocked state specifically to wait for a
7715 * notification then unblock it now. */
7716 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
7718 /* The task should not have been on an event list. */
7719 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
7721 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
7723 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
7724 prvAddTaskToReadyList( pxTCB );
7728 /* The delayed and ready lists cannot be accessed, so hold
7729 * this task pending until the scheduler is resumed. */
7730 listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
7733 #if ( configNUMBER_OF_CORES == 1 )
7735 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
7737 /* The notified task has a priority above the currently
7738 * executing task so a yield is required. */
7739 if( pxHigherPriorityTaskWoken != NULL )
7741 *pxHigherPriorityTaskWoken = pdTRUE;
7744 /* Mark that a yield is pending in case the user is not
7745 * using the "xHigherPriorityTaskWoken" parameter in an ISR
7746 * safe FreeRTOS function. */
7747 xYieldPendings[ 0 ] = pdTRUE;
7751 mtCOVERAGE_TEST_MARKER();
7754 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
7756 #if ( configUSE_PREEMPTION == 1 )
7758 prvYieldForTask( pxTCB );
7760 if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE )
7762 if( pxHigherPriorityTaskWoken != NULL )
7764 *pxHigherPriorityTaskWoken = pdTRUE;
7768 #endif /* #if ( configUSE_PREEMPTION == 1 ) */
7770 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
7773 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
7775 traceRETURN_vTaskGenericNotifyGiveFromISR();
7778 #endif /* configUSE_TASK_NOTIFICATIONS */
7779 /*-----------------------------------------------------------*/
7781 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7783 BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
7784 UBaseType_t uxIndexToClear )
7789 traceENTER_xTaskGenericNotifyStateClear( xTask, uxIndexToClear );
7791 configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7793 /* If null is passed in here then it is the calling task that is having
7794 * its notification state cleared. */
7795 pxTCB = prvGetTCBFromHandle( xTask );
7797 taskENTER_CRITICAL();
7799 if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
7801 pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
7809 taskEXIT_CRITICAL();
7811 traceRETURN_xTaskGenericNotifyStateClear( xReturn );
7816 #endif /* configUSE_TASK_NOTIFICATIONS */
7817 /*-----------------------------------------------------------*/
7819 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
7821 uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
7822 UBaseType_t uxIndexToClear,
7823 uint32_t ulBitsToClear )
7828 traceENTER_ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear );
7830 configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
7832 /* If null is passed in here then it is the calling task that is having
7833 * its notification state cleared. */
7834 pxTCB = prvGetTCBFromHandle( xTask );
7836 taskENTER_CRITICAL();
7838 /* Return the notification as it was before the bits were cleared,
7839 * then clear the bit mask. */
7840 ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
7841 pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
7843 taskEXIT_CRITICAL();
7845 traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );
7850 #endif /* configUSE_TASK_NOTIFICATIONS */
7851 /*-----------------------------------------------------------*/
7853 #if ( configGENERATE_RUN_TIME_STATS == 1 )
7855 configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask )
7859 traceENTER_ulTaskGetRunTimeCounter( xTask );
7861 pxTCB = prvGetTCBFromHandle( xTask );
7863 traceRETURN_ulTaskGetRunTimeCounter( pxTCB->ulRunTimeCounter );
7865 return pxTCB->ulRunTimeCounter;
7868 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
7869 /*-----------------------------------------------------------*/
7871 #if ( configGENERATE_RUN_TIME_STATS == 1 )
7873 configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask )
7876 configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
7878 traceENTER_ulTaskGetRunTimePercent( xTask );
7880 ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
7882 /* For percentage calculations. */
7883 ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
7885 /* Avoid divide by zero errors. */
7886 if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
7888 pxTCB = prvGetTCBFromHandle( xTask );
7889 ulReturn = pxTCB->ulRunTimeCounter / ulTotalTime;
7896 traceRETURN_ulTaskGetRunTimePercent( ulReturn );
7901 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
7902 /*-----------------------------------------------------------*/
7904 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
7906 configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
7908 configRUN_TIME_COUNTER_TYPE ulReturn = 0;
7911 traceENTER_ulTaskGetIdleRunTimeCounter();
7913 for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
7915 ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter;
7918 traceRETURN_ulTaskGetIdleRunTimeCounter( ulReturn );
7923 #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
7924 /*-----------------------------------------------------------*/
7926 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
7928 configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
7930 configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
7931 configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0;
7934 traceENTER_ulTaskGetIdleRunTimePercent();
7936 ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES;
7938 /* For percentage calculations. */
7939 ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
7941 /* Avoid divide by zero errors. */
7942 if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
7944 for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ )
7946 ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter;
7949 ulReturn = ulRunTimeCounter / ulTotalTime;
7956 traceRETURN_ulTaskGetIdleRunTimePercent( ulReturn );
7961 #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
7962 /*-----------------------------------------------------------*/
7964 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
7965 const BaseType_t xCanBlockIndefinitely )
7967 TickType_t xTimeToWake;
7968 const TickType_t xConstTickCount = xTickCount;
7970 #if ( INCLUDE_xTaskAbortDelay == 1 )
7972 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
7973 * reset to pdFALSE so it can be detected as having been set to pdTRUE
7974 * when the task leaves the Blocked state. */
7975 pxCurrentTCB->ucDelayAborted = pdFALSE;
7979 /* Remove the task from the ready list before adding it to the blocked list
7980 * as the same list item is used for both lists. */
7981 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
7983 /* The current task must be in a ready list, so there is no need to
7984 * check, and the port reset macro can be called directly. */
7985 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
7989 mtCOVERAGE_TEST_MARKER();
7992 #if ( INCLUDE_vTaskSuspend == 1 )
7994 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
7996 /* Add the task to the suspended task list instead of a delayed task
7997 * list to ensure it is not woken by a timing event. It will block
7999 listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
8003 /* Calculate the time at which the task should be woken if the event
8004 * does not occur. This may overflow but this doesn't matter, the
8005 * kernel will manage it correctly. */
8006 xTimeToWake = xConstTickCount + xTicksToWait;
8008 /* The list item will be inserted in wake time order. */
8009 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
8011 if( xTimeToWake < xConstTickCount )
8013 /* Wake time has overflowed. Place this item in the overflow
8015 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
8016 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
8020 /* The wake time has not overflowed, so the current block list
8022 traceMOVED_TASK_TO_DELAYED_LIST();
8023 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
8025 /* If the task entering the blocked state was placed at the
8026 * head of the list of blocked tasks then xNextTaskUnblockTime
8027 * needs to be updated too. */
8028 if( xTimeToWake < xNextTaskUnblockTime )
8030 xNextTaskUnblockTime = xTimeToWake;
8034 mtCOVERAGE_TEST_MARKER();
8039 #else /* INCLUDE_vTaskSuspend */
8041 /* Calculate the time at which the task should be woken if the event
8042 * does not occur. This may overflow but this doesn't matter, the kernel
8043 * will manage it correctly. */
8044 xTimeToWake = xConstTickCount + xTicksToWait;
8046 /* The list item will be inserted in wake time order. */
8047 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
8049 if( xTimeToWake < xConstTickCount )
8051 traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST();
8052 /* Wake time has overflowed. Place this item in the overflow list. */
8053 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
8057 traceMOVED_TASK_TO_DELAYED_LIST();
8058 /* The wake time has not overflowed, so the current block list is used. */
8059 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
8061 /* If the task entering the blocked state was placed at the head of the
8062 * list of blocked tasks then xNextTaskUnblockTime needs to be updated
8064 if( xTimeToWake < xNextTaskUnblockTime )
8066 xNextTaskUnblockTime = xTimeToWake;
8070 mtCOVERAGE_TEST_MARKER();
8074 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
8075 ( void ) xCanBlockIndefinitely;
8077 #endif /* INCLUDE_vTaskSuspend */
8079 /*-----------------------------------------------------------*/
8081 #if ( portUSING_MPU_WRAPPERS == 1 )
8083 xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask )
8087 traceENTER_xTaskGetMPUSettings( xTask );
8089 pxTCB = prvGetTCBFromHandle( xTask );
8091 traceRETURN_xTaskGetMPUSettings( &( pxTCB->xMPUSettings ) );
8093 return &( pxTCB->xMPUSettings );
8096 #endif /* portUSING_MPU_WRAPPERS */
8097 /*-----------------------------------------------------------*/
8099 /* Code below here allows additional code to be inserted into this source file,
8100 * especially where access to file scope functions and data is needed (for example
8101 * when performing module tests). */
8103 #ifdef FREERTOS_MODULE_TEST
8104 #include "tasks_test_access_functions.h"
8108 #if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
8110 #include "freertos_tasks_c_additions.h"
8112 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
8113 static void freertos_tasks_c_additions_init( void )
8115 FREERTOS_TASKS_C_ADDITIONS_INIT();
8119 #endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */
8120 /*-----------------------------------------------------------*/
8122 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
8125 * This is the kernel provided implementation of vApplicationGetIdleTaskMemory()
8126 * to provide the memory that is used by the Idle task. It is used when
8127 * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
8128 * it's own implementation of vApplicationGetIdleTaskMemory by setting
8129 * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
8131 void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer,
8132 StackType_t ** ppxIdleTaskStackBuffer,
8133 uint32_t * pulIdleTaskStackSize )
8135 static StaticTask_t xIdleTaskTCB;
8136 static StackType_t uxIdleTaskStack[ configMINIMAL_STACK_SIZE ];
8138 *ppxIdleTaskTCBBuffer = &( xIdleTaskTCB );
8139 *ppxIdleTaskStackBuffer = &( uxIdleTaskStack[ 0 ] );
8140 *pulIdleTaskStackSize = configMINIMAL_STACK_SIZE;
8143 #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
8144 /*-----------------------------------------------------------*/
8146 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
8149 * This is the kernel provided implementation of vApplicationGetTimerTaskMemory()
8150 * to provide the memory that is used by the Timer service task. It is used when
8151 * configKERNEL_PROVIDED_STATIC_MEMORY is set to 1. The application can provide
8152 * it's own implementation of vApplicationGetTimerTaskMemory by setting
8153 * configKERNEL_PROVIDED_STATIC_MEMORY to 0 or leaving it undefined.
8155 void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer,
8156 StackType_t ** ppxTimerTaskStackBuffer,
8157 uint32_t * pulTimerTaskStackSize )
8159 static StaticTask_t xTimerTaskTCB;
8160 static StackType_t uxTimerTaskStack[ configTIMER_TASK_STACK_DEPTH ];
8162 *ppxTimerTaskTCBBuffer = &( xTimerTaskTCB );
8163 *ppxTimerTaskStackBuffer = &( uxTimerTaskStack[ 0 ] );
8164 *pulTimerTaskStackSize = configTIMER_TASK_STACK_DEPTH;
8167 #endif /* #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configKERNEL_PROVIDED_STATIC_MEMORY == 1 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) */
8168 /*-----------------------------------------------------------*/