2 * FreeRTOS Kernel V10.3.1
\r
3 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
6 * this software and associated documentation files (the "Software"), to deal in
\r
7 * the Software without restriction, including without limitation the rights to
\r
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
9 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
10 * subject to the following conditions:
\r
12 * The above copyright notice and this permission notice shall be included in all
\r
13 * copies or substantial portions of the Software.
\r
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
22 * http://www.FreeRTOS.org
\r
23 * http://aws.amazon.com/freertos
\r
25 * 1 tab == 4 spaces!
\r
28 /* Standard includes. */
\r
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
33 all the API functions to use the MPU wrappers. That should only be done when
\r
34 task.h is included from an application file. */
\r
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
37 /* FreeRTOS includes. */
\r
38 #include "FreeRTOS.h"
\r
41 #include "stack_macros.h"
\r
43 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
\r
44 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
\r
45 for the header files above, but not in this file, in order to generate the
\r
46 correct privileged Vs unprivileged linkage and placement. */
\r
47 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
\r
49 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
\r
50 functions but without including stdio.h here. */
\r
51 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
\r
52 /* At the bottom of this file are two optional functions that can be used
\r
53 to generate human readable text from the raw data generated by the
\r
54 uxTaskGetSystemState() function. Note the formatting functions are provided
\r
55 for convenience only, and are NOT considered part of the kernel. */
\r
57 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
\r
59 #if( configUSE_PREEMPTION == 0 )
\r
60 /* If the cooperative scheduler is being used then a yield should not be
\r
61 performed just because a higher priority task has been woken. */
\r
62 #define taskYIELD_IF_USING_PREEMPTION()
\r
64 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
67 /* Values that can be assigned to the ucNotifyState member of the TCB. */
\r
68 #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
\r
69 #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
\r
70 #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
\r
73 * The value used to fill the stack of a task when the task is created. This
\r
74 * is used purely for checking the high water mark for tasks.
\r
76 #define tskSTACK_FILL_BYTE ( 0xa5U )
\r
78 /* Bits used to recored how a task's stack and TCB were allocated. */
\r
79 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
\r
80 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
\r
81 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
\r
83 /* If any of the following are set then task stacks are filled with a known
\r
84 value so the high water mark can be determined. If none of the following are
\r
85 set then don't fill the stack so there is no unnecessary dependency on memset. */
\r
86 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
\r
87 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
\r
89 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
\r
93 * Macros used by vListTask to indicate which state a task is in.
\r
95 #define tskRUNNING_CHAR ( 'X' )
\r
96 #define tskBLOCKED_CHAR ( 'B' )
\r
97 #define tskREADY_CHAR ( 'R' )
\r
98 #define tskDELETED_CHAR ( 'D' )
\r
99 #define tskSUSPENDED_CHAR ( 'S' )
\r
102 * Some kernel aware debuggers require the data the debugger needs access to be
\r
103 * global, rather than file scope.
\r
105 #ifdef portREMOVE_STATIC_QUALIFIER
\r
109 /* The name allocated to the Idle task. This can be overridden by defining
\r
110 configIDLE_TASK_NAME in FreeRTOSConfig.h. */
\r
111 #ifndef configIDLE_TASK_NAME
\r
112 #define configIDLE_TASK_NAME "IDLE"
\r
115 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
\r
117 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
\r
118 performed in a generic way that is not optimised to any particular
\r
119 microcontroller architecture. */
\r
121 /* uxTopReadyPriority holds the priority of the highest priority ready
\r
123 #define taskRECORD_READY_PRIORITY( uxPriority ) \
\r
125 if( ( uxPriority ) > uxTopReadyPriority ) \
\r
127 uxTopReadyPriority = ( uxPriority ); \
\r
129 } /* taskRECORD_READY_PRIORITY */
\r
131 /*-----------------------------------------------------------*/
\r
133 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
\r
135 UBaseType_t uxTopPriority = uxTopReadyPriority; \
\r
137 /* Find the highest priority queue that contains ready tasks. */ \
\r
138 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
\r
140 configASSERT( uxTopPriority ); \
\r
144 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
\r
145 the same priority get an equal share of the processor time. */ \
\r
146 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
\r
147 uxTopReadyPriority = uxTopPriority; \
\r
148 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
\r
150 /*-----------------------------------------------------------*/
\r
152 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
\r
153 they are only required when a port optimised method of task selection is
\r
155 #define taskRESET_READY_PRIORITY( uxPriority )
\r
156 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
\r
158 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
\r
160 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
\r
161 performed in a way that is tailored to the particular microcontroller
\r
162 architecture being used. */
\r
164 /* A port optimised version is provided. Call the port defined macros. */
\r
165 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
\r
167 /*-----------------------------------------------------------*/
\r
169 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
\r
171 UBaseType_t uxTopPriority; \
\r
173 /* Find the highest priority list that contains ready tasks. */ \
\r
174 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
\r
175 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
\r
176 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
\r
177 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
\r
179 /*-----------------------------------------------------------*/
\r
181 /* A port optimised version is provided, call it only if the TCB being reset
\r
182 is being referenced from a ready list. If it is referenced from a delayed
\r
183 or suspended list then it won't be in a ready list. */
\r
184 #define taskRESET_READY_PRIORITY( uxPriority ) \
\r
186 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
\r
188 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
\r
192 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
\r
194 /*-----------------------------------------------------------*/
\r
196 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
\r
197 count overflows. */
\r
198 #define taskSWITCH_DELAYED_LISTS() \
\r
202 /* The delayed tasks list should be empty when the lists are switched. */ \
\r
203 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
\r
205 pxTemp = pxDelayedTaskList; \
\r
206 pxDelayedTaskList = pxOverflowDelayedTaskList; \
\r
207 pxOverflowDelayedTaskList = pxTemp; \
\r
208 xNumOfOverflows++; \
\r
209 prvResetNextTaskUnblockTime(); \
\r
212 /*-----------------------------------------------------------*/
\r
215 * Place the task represented by pxTCB into the appropriate ready list for
\r
216 * the task. It is inserted at the end of the list.
\r
218 #define prvAddTaskToReadyList( pxTCB ) \
\r
219 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
\r
220 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
\r
221 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
\r
222 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
\r
223 /*-----------------------------------------------------------*/
\r
226 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
\r
227 * where NULL is used to indicate that the handle of the currently executing
\r
228 * task should be used in place of the parameter. This macro simply checks to
\r
229 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
\r
231 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
\r
233 /* The item value of the event list item is normally used to hold the priority
\r
234 of the task to which it belongs (coded to allow it to be held in reverse
\r
235 priority order). However, it is occasionally borrowed for other purposes. It
\r
236 is important its value is not updated due to a task priority change while it is
\r
237 being used for another purpose. The following bit definition is used to inform
\r
238 the scheduler that the value should not be changed - in which case it is the
\r
239 responsibility of whichever module is using the value to ensure it gets set back
\r
240 to its original value when it is released. */
\r
241 #if( configUSE_16_BIT_TICKS == 1 )
\r
242 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
\r
244 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
\r
248 * Task control block. A task control block (TCB) is allocated for each task,
\r
249 * and stores task state information, including a pointer to the task's context
\r
250 * (the task's run time environment, including register values)
\r
252 typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
\r
254 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
\r
256 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
257 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
\r
260 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
\r
261 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
\r
262 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
\r
263 StackType_t *pxStack; /*< Points to the start of the stack. */
\r
264 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
266 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
\r
267 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
\r
270 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
271 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
\r
274 #if ( configUSE_TRACE_FACILITY == 1 )
\r
275 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
\r
276 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
\r
279 #if ( configUSE_MUTEXES == 1 )
\r
280 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
\r
281 UBaseType_t uxMutexesHeld;
\r
284 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
285 TaskHookFunction_t pxTaskTag;
\r
288 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
\r
289 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
\r
292 #if( configGENERATE_RUN_TIME_STATS == 1 )
\r
293 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
\r
296 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
297 /* Allocate a Newlib reent structure that is specific to this task.
\r
298 Note Newlib support has been included by popular demand, but is not
\r
299 used by the FreeRTOS maintainers themselves. FreeRTOS is not
\r
300 responsible for resulting newlib operation. User must be familiar with
\r
301 newlib and must provide system-wide implementations of the necessary
\r
302 stubs. Be warned that (at the time of writing) the current newlib design
\r
303 implements a system-wide malloc() that must be provided with locks.
\r
305 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
306 for additional information. */
\r
307 struct _reent xNewLib_reent;
\r
310 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
311 volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
\r
312 volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
\r
315 /* See the comments in FreeRTOS.h with the definition of
\r
316 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
\r
317 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
\r
318 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
\r
321 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
322 uint8_t ucDelayAborted;
\r
325 #if( configUSE_POSIX_ERRNO == 1 )
\r
331 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
\r
332 below to enable the use of older kernel aware debuggers. */
\r
333 typedef tskTCB TCB_t;
\r
335 /*lint -save -e956 A manual analysis and inspection has been used to determine
\r
336 which static variables must be declared volatile. */
\r
337 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
\r
339 /* Lists for ready and blocked tasks. --------------------
\r
340 xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
\r
341 doing so breaks some kernel aware debuggers and debuggers that rely on removing
\r
342 the static qualifier. */
\r
343 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
\r
344 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
\r
345 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
\r
346 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
\r
347 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
\r
348 PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
\r
350 #if( INCLUDE_vTaskDelete == 1 )
\r
352 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
\r
353 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
\r
357 #if ( INCLUDE_vTaskSuspend == 1 )
\r
359 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
\r
363 /* Global POSIX errno. Its value is changed upon context switching to match
\r
364 the errno of the currently running task. */
\r
365 #if ( configUSE_POSIX_ERRNO == 1 )
\r
366 int FreeRTOS_errno = 0;
\r
369 /* Other file private variables. --------------------------------*/
\r
370 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
\r
371 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
\r
372 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
\r
373 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
\r
374 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
\r
375 PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
\r
376 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
\r
377 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
\r
378 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
\r
379 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
\r
381 /* Context switches are held pending while the scheduler is suspended. Also,
\r
382 interrupts must not manipulate the xStateListItem of a TCB, or any of the
\r
383 lists the xStateListItem can be referenced from, if the scheduler is suspended.
\r
384 If an interrupt needs to unblock a task while the scheduler is suspended then it
\r
385 moves the task's event list item into the xPendingReadyList, ready for the
\r
386 kernel to move the task from the pending ready list into the real ready list
\r
387 when the scheduler is unsuspended. The pending ready list itself can only be
\r
388 accessed from a critical section. */
\r
389 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
\r
391 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
393 /* Do not move these variables to function scope as doing so prevents the
\r
394 code working with debuggers that need to remove the static qualifier. */
\r
395 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
\r
396 PRIVILEGED_DATA static volatile uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
\r
402 /*-----------------------------------------------------------*/
\r
404 /* Callback function prototypes. --------------------------*/
\r
405 #if( configCHECK_FOR_STACK_OVERFLOW > 0 )
\r
407 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
\r
411 #if( configUSE_TICK_HOOK > 0 )
\r
413 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
\r
417 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
419 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
\r
423 /* File private functions. --------------------------------*/
\r
426 * Utility task that simply returns pdTRUE if the task referenced by xTask is
\r
427 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
\r
428 * is in any other state.
\r
430 #if ( INCLUDE_vTaskSuspend == 1 )
\r
432 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
\r
434 #endif /* INCLUDE_vTaskSuspend */
\r
437 * Utility to ready all the lists used by the scheduler. This is called
\r
438 * automatically upon the creation of the first task.
\r
440 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
\r
443 * The idle task, which as all tasks is implemented as a never ending loop.
\r
444 * The idle task is automatically created and added to the ready lists upon
\r
445 * creation of the first user task.
\r
447 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
\r
448 * language extensions. The equivalent prototype for this function is:
\r
450 * void prvIdleTask( void *pvParameters );
\r
453 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
\r
456 * Utility to free all memory allocated by the scheduler to hold a TCB,
\r
457 * including the stack pointed to by the TCB.
\r
459 * This does not free memory allocated by the task itself (i.e. memory
\r
460 * allocated by calls to pvPortMalloc from within the tasks application code).
\r
462 #if ( INCLUDE_vTaskDelete == 1 )
\r
464 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
\r
469 * Used only by the idle task. This checks to see if anything has been placed
\r
470 * in the list of tasks waiting to be deleted. If so the task is cleaned up
\r
471 * and its TCB deleted.
\r
473 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
\r
476 * The currently executing task is entering the Blocked state. Add the task to
\r
477 * either the current or the overflow delayed task list.
\r
479 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
\r
482 * Fills an TaskStatus_t structure with information on each task that is
\r
483 * referenced from the pxList list (which may be a ready list, a delayed list,
\r
484 * a suspended list, etc.).
\r
486 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
\r
487 * NORMAL APPLICATION CODE.
\r
489 #if ( configUSE_TRACE_FACILITY == 1 )
\r
491 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
\r
496 * Searches pxList for a task with name pcNameToQuery - returning a handle to
\r
497 * the task if it is found, or NULL if the task is not found.
\r
499 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
501 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
\r
506 * When a task is created, the stack of the task is filled with a known value.
\r
507 * This function determines the 'high water mark' of the task stack by
\r
508 * determining how much of the stack remains at the original preset value.
\r
510 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
\r
512 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
\r
517 * Return the amount of time, in ticks, that will pass before the kernel will
\r
518 * next move a task from the Blocked state to the Running state.
\r
520 * This conditional compilation should use inequality to 0, not equality to 1.
\r
521 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
\r
522 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
\r
523 * set to a value other than 1.
\r
525 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
527 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
\r
532 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
\r
533 * will exit the Blocked state.
\r
535 static void prvResetNextTaskUnblockTime( void );
\r
537 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
\r
540 * Helper function used to pad task names with spaces when printing out
\r
541 * human readable tables of task information.
\r
543 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
\r
548 * Called after a Task_t structure has been allocated either statically or
\r
549 * dynamically to fill in the structure's members.
\r
551 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
\r
552 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
553 const uint32_t ulStackDepth,
\r
554 void * const pvParameters,
\r
555 UBaseType_t uxPriority,
\r
556 TaskHandle_t * const pxCreatedTask,
\r
558 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
\r
561 * Called after a new task has been created and initialised to place the task
\r
562 * under the control of the scheduler.
\r
564 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
\r
567 * freertos_tasks_c_additions_init() should only be called if the user definable
\r
568 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
\r
569 * called by the function.
\r
571 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
573 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
\r
577 /*-----------------------------------------------------------*/
\r
579 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
581 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
\r
582 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
583 const uint32_t ulStackDepth,
\r
584 void * const pvParameters,
\r
585 UBaseType_t uxPriority,
\r
586 StackType_t * const puxStackBuffer,
\r
587 StaticTask_t * const pxTaskBuffer )
\r
590 TaskHandle_t xReturn;
\r
592 configASSERT( puxStackBuffer != NULL );
\r
593 configASSERT( pxTaskBuffer != NULL );
\r
595 #if( configASSERT_DEFINED == 1 )
\r
597 /* Sanity check that the size of the structure used to declare a
\r
598 variable of type StaticTask_t equals the size of the real task
\r
600 volatile size_t xSize = sizeof( StaticTask_t );
\r
601 configASSERT( xSize == sizeof( TCB_t ) );
\r
602 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
\r
604 #endif /* configASSERT_DEFINED */
\r
607 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
\r
609 /* The memory used for the task's TCB and stack are passed into this
\r
610 function - use them. */
\r
611 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
612 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
\r
614 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
\r
616 /* Tasks can be created statically or dynamically, so note this
\r
617 task was created statically in case the task is later deleted. */
\r
618 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
\r
620 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
622 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
\r
623 prvAddNewTaskToReadyList( pxNewTCB );
\r
633 #endif /* SUPPORT_STATIC_ALLOCATION */
\r
634 /*-----------------------------------------------------------*/
\r
636 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
638 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
\r
641 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
643 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
\r
644 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
\r
646 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
\r
648 /* Allocate space for the TCB. Where the memory comes from depends
\r
649 on the implementation of the port malloc function and whether or
\r
650 not static allocation is being used. */
\r
651 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
\r
653 /* Store the stack location in the TCB. */
\r
654 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
\r
656 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
\r
658 /* Tasks can be created statically or dynamically, so note this
\r
659 task was created statically in case the task is later deleted. */
\r
660 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
\r
662 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
664 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
\r
665 pxTaskDefinition->pcName,
\r
666 ( uint32_t ) pxTaskDefinition->usStackDepth,
\r
667 pxTaskDefinition->pvParameters,
\r
668 pxTaskDefinition->uxPriority,
\r
669 pxCreatedTask, pxNewTCB,
\r
670 pxTaskDefinition->xRegions );
\r
672 prvAddNewTaskToReadyList( pxNewTCB );
\r
679 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
\r
680 /*-----------------------------------------------------------*/
\r
682 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
684 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
\r
687 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
689 configASSERT( pxTaskDefinition->puxStackBuffer );
\r
691 if( pxTaskDefinition->puxStackBuffer != NULL )
\r
693 /* Allocate space for the TCB. Where the memory comes from depends
\r
694 on the implementation of the port malloc function and whether or
\r
695 not static allocation is being used. */
\r
696 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
\r
698 if( pxNewTCB != NULL )
\r
700 /* Store the stack location in the TCB. */
\r
701 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
\r
703 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
\r
705 /* Tasks can be created statically or dynamically, so note
\r
706 this task had a statically allocated stack in case it is
\r
707 later deleted. The TCB was allocated dynamically. */
\r
708 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
\r
710 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
712 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
\r
713 pxTaskDefinition->pcName,
\r
714 ( uint32_t ) pxTaskDefinition->usStackDepth,
\r
715 pxTaskDefinition->pvParameters,
\r
716 pxTaskDefinition->uxPriority,
\r
717 pxCreatedTask, pxNewTCB,
\r
718 pxTaskDefinition->xRegions );
\r
720 prvAddNewTaskToReadyList( pxNewTCB );
\r
728 #endif /* portUSING_MPU_WRAPPERS */
\r
729 /*-----------------------------------------------------------*/
\r
731 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
733 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
\r
734 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
735 const configSTACK_DEPTH_TYPE usStackDepth,
\r
736 void * const pvParameters,
\r
737 UBaseType_t uxPriority,
\r
738 TaskHandle_t * const pxCreatedTask )
\r
741 BaseType_t xReturn;
\r
743 /* If the stack grows down then allocate the stack then the TCB so the stack
\r
744 does not grow into the TCB. Likewise if the stack grows up then allocate
\r
745 the TCB then the stack. */
\r
746 #if( portSTACK_GROWTH > 0 )
\r
748 /* Allocate space for the TCB. Where the memory comes from depends on
\r
749 the implementation of the port malloc function and whether or not static
\r
750 allocation is being used. */
\r
751 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
\r
753 if( pxNewTCB != NULL )
\r
755 /* Allocate space for the stack used by the task being created.
\r
756 The base of the stack memory stored in the TCB so the task can
\r
757 be deleted later if required. */
\r
758 pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
760 if( pxNewTCB->pxStack == NULL )
\r
762 /* Could not allocate the stack. Delete the allocated TCB. */
\r
763 vPortFree( pxNewTCB );
\r
768 #else /* portSTACK_GROWTH */
\r
770 StackType_t *pxStack;
\r
772 /* Allocate space for the stack used by the task being created. */
\r
773 pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
\r
775 if( pxStack != NULL )
\r
777 /* Allocate space for the TCB. */
\r
778 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
\r
780 if( pxNewTCB != NULL )
\r
782 /* Store the stack location in the TCB. */
\r
783 pxNewTCB->pxStack = pxStack;
\r
787 /* The stack cannot be used as the TCB was not created. Free
\r
789 vPortFree( pxStack );
\r
797 #endif /* portSTACK_GROWTH */
\r
799 if( pxNewTCB != NULL )
\r
801 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
\r
803 /* Tasks can be created statically or dynamically, so note this
\r
804 task was created dynamically in case it is later deleted. */
\r
805 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
\r
807 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
\r
809 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
\r
810 prvAddNewTaskToReadyList( pxNewTCB );
\r
815 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
\r
821 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
822 /*-----------------------------------------------------------*/
\r
824 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
\r
825 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
826 const uint32_t ulStackDepth,
\r
827 void * const pvParameters,
\r
828 UBaseType_t uxPriority,
\r
829 TaskHandle_t * const pxCreatedTask,
\r
831 const MemoryRegion_t * const xRegions )
\r
833 StackType_t *pxTopOfStack;
\r
836 #if( portUSING_MPU_WRAPPERS == 1 )
\r
837 /* Should the task be created in privileged mode? */
\r
838 BaseType_t xRunPrivileged;
\r
839 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
\r
841 xRunPrivileged = pdTRUE;
\r
845 xRunPrivileged = pdFALSE;
\r
847 uxPriority &= ~portPRIVILEGE_BIT;
\r
848 #endif /* portUSING_MPU_WRAPPERS == 1 */
\r
850 /* Avoid dependency on memset() if it is not required. */
\r
851 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
\r
853 /* Fill the stack with a known value to assist debugging. */
\r
854 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
\r
856 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
\r
858 /* Calculate the top of stack address. This depends on whether the stack
\r
859 grows from high memory to low (as per the 80x86) or vice versa.
\r
860 portSTACK_GROWTH is used to make the result positive or negative as required
\r
862 #if( portSTACK_GROWTH < 0 )
\r
864 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
\r
865 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
\r
867 /* Check the alignment of the calculated top of stack is correct. */
\r
868 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
\r
870 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
\r
872 /* Also record the stack's high address, which may assist
\r
874 pxNewTCB->pxEndOfStack = pxTopOfStack;
\r
876 #endif /* configRECORD_STACK_HIGH_ADDRESS */
\r
878 #else /* portSTACK_GROWTH */
\r
880 pxTopOfStack = pxNewTCB->pxStack;
\r
882 /* Check the alignment of the stack buffer is correct. */
\r
883 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
\r
885 /* The other extreme of the stack space is required if stack checking is
\r
887 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
\r
889 #endif /* portSTACK_GROWTH */
\r
891 /* Store the task name in the TCB. */
\r
892 if( pcName != NULL )
\r
894 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
\r
896 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
\r
898 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
\r
899 configMAX_TASK_NAME_LEN characters just in case the memory after the
\r
900 string is not accessible (extremely unlikely). */
\r
901 if( pcName[ x ] == ( char ) 0x00 )
\r
907 mtCOVERAGE_TEST_MARKER();
\r
911 /* Ensure the name string is terminated in the case that the string length
\r
912 was greater or equal to configMAX_TASK_NAME_LEN. */
\r
913 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
\r
917 /* The task has not been given a name, so just ensure there is a NULL
\r
918 terminator when it is read out. */
\r
919 pxNewTCB->pcTaskName[ 0 ] = 0x00;
\r
922 /* This is used as an array index so must ensure it's not too large. First
\r
923 remove the privilege bit if one is present. */
\r
924 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
\r
926 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
\r
930 mtCOVERAGE_TEST_MARKER();
\r
933 pxNewTCB->uxPriority = uxPriority;
\r
934 #if ( configUSE_MUTEXES == 1 )
\r
936 pxNewTCB->uxBasePriority = uxPriority;
\r
937 pxNewTCB->uxMutexesHeld = 0;
\r
939 #endif /* configUSE_MUTEXES */
\r
941 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
\r
942 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
\r
944 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
\r
945 back to the containing TCB from a generic item in a list. */
\r
946 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
\r
948 /* Event lists are always in priority order. */
\r
949 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
950 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
\r
952 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
954 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
\r
956 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
958 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
960 pxNewTCB->pxTaskTag = NULL;
\r
962 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
964 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
966 pxNewTCB->ulRunTimeCounter = 0UL;
\r
968 #endif /* configGENERATE_RUN_TIME_STATS */
\r
970 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
972 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
\r
976 /* Avoid compiler warning about unreferenced parameter. */
\r
981 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
983 memset( ( void * ) &( pxNewTCB->pvThreadLocalStoragePointers[ 0 ] ), 0x00, sizeof( pxNewTCB->pvThreadLocalStoragePointers ) );
\r
987 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
\r
989 memset( ( void * ) &( pxNewTCB->ulNotifiedValue[ 0 ] ), 0x00, sizeof( pxNewTCB->ulNotifiedValue ) );
\r
990 memset( ( void * ) &( pxNewTCB->ucNotifyState[ 0 ] ), 0x00, sizeof( pxNewTCB->ucNotifyState ) );
\r
994 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
996 /* Initialise this task's Newlib reent structure.
\r
997 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
998 for additional information. */
\r
999 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
\r
1003 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
1005 pxNewTCB->ucDelayAborted = pdFALSE;
\r
1009 /* Initialize the TCB stack to look as if the task was already running,
\r
1010 but had been interrupted by the scheduler. The return address is set
\r
1011 to the start of the task function. Once the stack has been initialised
\r
1012 the top of stack variable is updated. */
\r
1013 #if( portUSING_MPU_WRAPPERS == 1 )
\r
1015 /* If the port has capability to detect stack overflow,
\r
1016 pass the stack end address to the stack initialization
\r
1017 function as well. */
\r
1018 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
\r
1020 #if( portSTACK_GROWTH < 0 )
\r
1022 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
\r
1024 #else /* portSTACK_GROWTH */
\r
1026 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
\r
1028 #endif /* portSTACK_GROWTH */
\r
1030 #else /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1032 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
\r
1034 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1036 #else /* portUSING_MPU_WRAPPERS */
\r
1038 /* If the port has capability to detect stack overflow,
\r
1039 pass the stack end address to the stack initialization
\r
1040 function as well. */
\r
1041 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
\r
1043 #if( portSTACK_GROWTH < 0 )
\r
1045 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
\r
1047 #else /* portSTACK_GROWTH */
\r
1049 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
\r
1051 #endif /* portSTACK_GROWTH */
\r
1053 #else /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1055 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
\r
1057 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
\r
1059 #endif /* portUSING_MPU_WRAPPERS */
\r
1061 if( pxCreatedTask != NULL )
\r
1063 /* Pass the handle out in an anonymous way. The handle can be used to
\r
1064 change the created task's priority, delete the created task, etc.*/
\r
1065 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
\r
1069 mtCOVERAGE_TEST_MARKER();
\r
1072 /*-----------------------------------------------------------*/
\r
1074 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
\r
1076 /* Ensure interrupts don't access the task lists while the lists are being
\r
1078 taskENTER_CRITICAL();
\r
1080 uxCurrentNumberOfTasks++;
\r
1081 if( pxCurrentTCB == NULL )
\r
1083 /* There are no other tasks, or all the other tasks are in
\r
1084 the suspended state - make this the current task. */
\r
1085 pxCurrentTCB = pxNewTCB;
\r
1087 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
\r
1089 /* This is the first task to be created so do the preliminary
\r
1090 initialisation required. We will not recover if this call
\r
1091 fails, but we will report the failure. */
\r
1092 prvInitialiseTaskLists();
\r
1096 mtCOVERAGE_TEST_MARKER();
\r
1101 /* If the scheduler is not already running, make this task the
\r
1102 current task if it is the highest priority task to be created
\r
1104 if( xSchedulerRunning == pdFALSE )
\r
1106 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
\r
1108 pxCurrentTCB = pxNewTCB;
\r
1112 mtCOVERAGE_TEST_MARKER();
\r
1117 mtCOVERAGE_TEST_MARKER();
\r
1123 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1125 /* Add a counter into the TCB for tracing only. */
\r
1126 pxNewTCB->uxTCBNumber = uxTaskNumber;
\r
1128 #endif /* configUSE_TRACE_FACILITY */
\r
1129 traceTASK_CREATE( pxNewTCB );
\r
1131 prvAddTaskToReadyList( pxNewTCB );
\r
1133 portSETUP_TCB( pxNewTCB );
\r
1135 taskEXIT_CRITICAL();
\r
1137 if( xSchedulerRunning != pdFALSE )
\r
1139 /* If the created task is of a higher priority than the current task
\r
1140 then it should run now. */
\r
1141 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
\r
1143 taskYIELD_IF_USING_PREEMPTION();
\r
1147 mtCOVERAGE_TEST_MARKER();
\r
1152 mtCOVERAGE_TEST_MARKER();
\r
1155 /*-----------------------------------------------------------*/
\r
1157 #if ( INCLUDE_vTaskDelete == 1 )
\r
1159 void vTaskDelete( TaskHandle_t xTaskToDelete )
\r
1163 taskENTER_CRITICAL();
\r
1165 /* If null is passed in here then it is the calling task that is
\r
1167 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
\r
1169 /* Remove task from the ready/delayed list. */
\r
1170 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1172 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
1176 mtCOVERAGE_TEST_MARKER();
\r
1179 /* Is the task waiting on an event also? */
\r
1180 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
1182 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
1186 mtCOVERAGE_TEST_MARKER();
\r
1189 /* Increment the uxTaskNumber also so kernel aware debuggers can
\r
1190 detect that the task lists need re-generating. This is done before
\r
1191 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
\r
1195 if( pxTCB == pxCurrentTCB )
\r
1197 /* A task is deleting itself. This cannot complete within the
\r
1198 task itself, as a context switch to another task is required.
\r
1199 Place the task in the termination list. The idle task will
\r
1200 check the termination list and free up any memory allocated by
\r
1201 the scheduler for the TCB and stack of the deleted task. */
\r
1202 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
\r
1204 /* Increment the ucTasksDeleted variable so the idle task knows
\r
1205 there is a task that has been deleted and that it should therefore
\r
1206 check the xTasksWaitingTermination list. */
\r
1207 ++uxDeletedTasksWaitingCleanUp;
\r
1209 /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
\r
1210 portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
\r
1211 traceTASK_DELETE( pxTCB );
\r
1213 /* The pre-delete hook is primarily for the Windows simulator,
\r
1214 in which Windows specific clean up operations are performed,
\r
1215 after which it is not possible to yield away from this task -
\r
1216 hence xYieldPending is used to latch that a context switch is
\r
1218 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
\r
1222 --uxCurrentNumberOfTasks;
\r
1223 traceTASK_DELETE( pxTCB );
\r
1224 prvDeleteTCB( pxTCB );
\r
1226 /* Reset the next expected unblock time in case it referred to
\r
1227 the task that has just been deleted. */
\r
1228 prvResetNextTaskUnblockTime();
\r
1231 taskEXIT_CRITICAL();
\r
1233 /* Force a reschedule if it is the currently running task that has just
\r
1235 if( xSchedulerRunning != pdFALSE )
\r
1237 if( pxTCB == pxCurrentTCB )
\r
1239 configASSERT( uxSchedulerSuspended == 0 );
\r
1240 portYIELD_WITHIN_API();
\r
1244 mtCOVERAGE_TEST_MARKER();
\r
1249 #endif /* INCLUDE_vTaskDelete */
\r
1250 /*-----------------------------------------------------------*/
\r
1252 #if ( INCLUDE_vTaskDelayUntil == 1 )
\r
1254 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
\r
1256 TickType_t xTimeToWake;
\r
1257 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
\r
1259 configASSERT( pxPreviousWakeTime );
\r
1260 configASSERT( ( xTimeIncrement > 0U ) );
\r
1261 configASSERT( uxSchedulerSuspended == 0 );
\r
1263 vTaskSuspendAll();
\r
1265 /* Minor optimisation. The tick count cannot change in this
\r
1267 const TickType_t xConstTickCount = xTickCount;
\r
1269 /* Generate the tick time at which the task wants to wake. */
\r
1270 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
\r
1272 if( xConstTickCount < *pxPreviousWakeTime )
\r
1274 /* The tick count has overflowed since this function was
\r
1275 lasted called. In this case the only time we should ever
\r
1276 actually delay is if the wake time has also overflowed,
\r
1277 and the wake time is greater than the tick time. When this
\r
1278 is the case it is as if neither time had overflowed. */
\r
1279 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
\r
1281 xShouldDelay = pdTRUE;
\r
1285 mtCOVERAGE_TEST_MARKER();
\r
1290 /* The tick time has not overflowed. In this case we will
\r
1291 delay if either the wake time has overflowed, and/or the
\r
1292 tick time is less than the wake time. */
\r
1293 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
\r
1295 xShouldDelay = pdTRUE;
\r
1299 mtCOVERAGE_TEST_MARKER();
\r
1303 /* Update the wake time ready for the next call. */
\r
1304 *pxPreviousWakeTime = xTimeToWake;
\r
1306 if( xShouldDelay != pdFALSE )
\r
1308 traceTASK_DELAY_UNTIL( xTimeToWake );
\r
1310 /* prvAddCurrentTaskToDelayedList() needs the block time, not
\r
1311 the time to wake, so subtract the current tick count. */
\r
1312 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
\r
1316 mtCOVERAGE_TEST_MARKER();
\r
1319 xAlreadyYielded = xTaskResumeAll();
\r
1321 /* Force a reschedule if xTaskResumeAll has not already done so, we may
\r
1322 have put ourselves to sleep. */
\r
1323 if( xAlreadyYielded == pdFALSE )
\r
1325 portYIELD_WITHIN_API();
\r
1329 mtCOVERAGE_TEST_MARKER();
\r
1333 #endif /* INCLUDE_vTaskDelayUntil */
\r
1334 /*-----------------------------------------------------------*/
\r
1336 #if ( INCLUDE_vTaskDelay == 1 )
\r
1338 void vTaskDelay( const TickType_t xTicksToDelay )
\r
1340 BaseType_t xAlreadyYielded = pdFALSE;
\r
1342 /* A delay time of zero just forces a reschedule. */
\r
1343 if( xTicksToDelay > ( TickType_t ) 0U )
\r
1345 configASSERT( uxSchedulerSuspended == 0 );
\r
1346 vTaskSuspendAll();
\r
1348 traceTASK_DELAY();
\r
1350 /* A task that is removed from the event list while the
\r
1351 scheduler is suspended will not get placed in the ready
\r
1352 list or removed from the blocked list until the scheduler
\r
1355 This task cannot be in an event list as it is the currently
\r
1356 executing task. */
\r
1357 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
\r
1359 xAlreadyYielded = xTaskResumeAll();
\r
1363 mtCOVERAGE_TEST_MARKER();
\r
1366 /* Force a reschedule if xTaskResumeAll has not already done so, we may
\r
1367 have put ourselves to sleep. */
\r
1368 if( xAlreadyYielded == pdFALSE )
\r
1370 portYIELD_WITHIN_API();
\r
1374 mtCOVERAGE_TEST_MARKER();
\r
1378 #endif /* INCLUDE_vTaskDelay */
\r
1379 /*-----------------------------------------------------------*/
\r
1381 #if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
\r
1383 eTaskState eTaskGetState( TaskHandle_t xTask )
\r
1385 eTaskState eReturn;
\r
1386 List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
\r
1387 const TCB_t * const pxTCB = xTask;
\r
1389 configASSERT( pxTCB );
\r
1391 if( pxTCB == pxCurrentTCB )
\r
1393 /* The task calling this function is querying its own state. */
\r
1394 eReturn = eRunning;
\r
1398 taskENTER_CRITICAL();
\r
1400 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
\r
1401 pxDelayedList = pxDelayedTaskList;
\r
1402 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
\r
1404 taskEXIT_CRITICAL();
\r
1406 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
\r
1408 /* The task being queried is referenced from one of the Blocked
\r
1410 eReturn = eBlocked;
\r
1413 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1414 else if( pxStateList == &xSuspendedTaskList )
\r
1416 /* The task being queried is referenced from the suspended
\r
1417 list. Is it genuinely suspended or is it blocked
\r
1419 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
\r
1421 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
1425 /* The task does not appear on the event list item of
\r
1426 and of the RTOS objects, but could still be in the
\r
1427 blocked state if it is waiting on its notification
\r
1428 rather than waiting on an object. If not, is
\r
1430 eReturn = eSuspended;
\r
1431 for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
\r
1433 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
\r
1435 eReturn = eBlocked;
\r
1442 eReturn = eSuspended;
\r
1448 eReturn = eBlocked;
\r
1453 #if ( INCLUDE_vTaskDelete == 1 )
\r
1454 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
\r
1456 /* The task being queried is referenced from the deleted
\r
1457 tasks list, or it is not referenced from any lists at
\r
1459 eReturn = eDeleted;
\r
1463 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
\r
1465 /* If the task is not in any other state, it must be in the
\r
1466 Ready (including pending ready) state. */
\r
1472 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
\r
1474 #endif /* INCLUDE_eTaskGetState */
\r
1475 /*-----------------------------------------------------------*/
\r
1477 #if ( INCLUDE_uxTaskPriorityGet == 1 )
\r
1479 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
\r
1481 TCB_t const *pxTCB;
\r
1482 UBaseType_t uxReturn;
\r
1484 taskENTER_CRITICAL();
\r
1486 /* If null is passed in here then it is the priority of the task
\r
1487 that called uxTaskPriorityGet() that is being queried. */
\r
1488 pxTCB = prvGetTCBFromHandle( xTask );
\r
1489 uxReturn = pxTCB->uxPriority;
\r
1491 taskEXIT_CRITICAL();
\r
1496 #endif /* INCLUDE_uxTaskPriorityGet */
\r
1497 /*-----------------------------------------------------------*/
\r
1499 #if ( INCLUDE_uxTaskPriorityGet == 1 )
\r
1501 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
\r
1503 TCB_t const *pxTCB;
\r
1504 UBaseType_t uxReturn, uxSavedInterruptState;
\r
1506 /* RTOS ports that support interrupt nesting have the concept of a
\r
1507 maximum system call (or maximum API call) interrupt priority.
\r
1508 Interrupts that are above the maximum system call priority are keep
\r
1509 permanently enabled, even when the RTOS kernel is in a critical section,
\r
1510 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
1511 is defined in FreeRTOSConfig.h then
\r
1512 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1513 failure if a FreeRTOS API function is called from an interrupt that has
\r
1514 been assigned a priority above the configured maximum system call
\r
1515 priority. Only FreeRTOS functions that end in FromISR can be called
\r
1516 from interrupts that have been assigned a priority at or (logically)
\r
1517 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
1518 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
1519 simple as possible. More information (albeit Cortex-M specific) is
\r
1520 provided on the following link:
\r
1521 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1522 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1524 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1526 /* If null is passed in here then it is the priority of the calling
\r
1527 task that is being queried. */
\r
1528 pxTCB = prvGetTCBFromHandle( xTask );
\r
1529 uxReturn = pxTCB->uxPriority;
\r
1531 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
\r
1536 #endif /* INCLUDE_uxTaskPriorityGet */
\r
1537 /*-----------------------------------------------------------*/
\r
1539 #if ( INCLUDE_vTaskPrioritySet == 1 )
\r
1541 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
\r
1544 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
\r
1545 BaseType_t xYieldRequired = pdFALSE;
\r
1547 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
\r
1549 /* Ensure the new priority is valid. */
\r
1550 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
\r
1552 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
\r
1556 mtCOVERAGE_TEST_MARKER();
\r
1559 taskENTER_CRITICAL();
\r
1561 /* If null is passed in here then it is the priority of the calling
\r
1562 task that is being changed. */
\r
1563 pxTCB = prvGetTCBFromHandle( xTask );
\r
1565 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
\r
1567 #if ( configUSE_MUTEXES == 1 )
\r
1569 uxCurrentBasePriority = pxTCB->uxBasePriority;
\r
1573 uxCurrentBasePriority = pxTCB->uxPriority;
\r
1577 if( uxCurrentBasePriority != uxNewPriority )
\r
1579 /* The priority change may have readied a task of higher
\r
1580 priority than the calling task. */
\r
1581 if( uxNewPriority > uxCurrentBasePriority )
\r
1583 if( pxTCB != pxCurrentTCB )
\r
1585 /* The priority of a task other than the currently
\r
1586 running task is being raised. Is the priority being
\r
1587 raised above that of the running task? */
\r
1588 if( uxNewPriority >= pxCurrentTCB->uxPriority )
\r
1590 xYieldRequired = pdTRUE;
\r
1594 mtCOVERAGE_TEST_MARKER();
\r
1599 /* The priority of the running task is being raised,
\r
1600 but the running task must already be the highest
\r
1601 priority task able to run so no yield is required. */
\r
1604 else if( pxTCB == pxCurrentTCB )
\r
1606 /* Setting the priority of the running task down means
\r
1607 there may now be another task of higher priority that
\r
1608 is ready to execute. */
\r
1609 xYieldRequired = pdTRUE;
\r
1613 /* Setting the priority of any other task down does not
\r
1614 require a yield as the running task must be above the
\r
1615 new priority of the task being modified. */
\r
1618 /* Remember the ready list the task might be referenced from
\r
1619 before its uxPriority member is changed so the
\r
1620 taskRESET_READY_PRIORITY() macro can function correctly. */
\r
1621 uxPriorityUsedOnEntry = pxTCB->uxPriority;
\r
1623 #if ( configUSE_MUTEXES == 1 )
\r
1625 /* Only change the priority being used if the task is not
\r
1626 currently using an inherited priority. */
\r
1627 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
\r
1629 pxTCB->uxPriority = uxNewPriority;
\r
1633 mtCOVERAGE_TEST_MARKER();
\r
1636 /* The base priority gets set whatever. */
\r
1637 pxTCB->uxBasePriority = uxNewPriority;
\r
1641 pxTCB->uxPriority = uxNewPriority;
\r
1645 /* Only reset the event list item value if the value is not
\r
1646 being used for anything else. */
\r
1647 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
1649 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1653 mtCOVERAGE_TEST_MARKER();
\r
1656 /* If the task is in the blocked or suspended list we need do
\r
1657 nothing more than change its priority variable. However, if
\r
1658 the task is in a ready list it needs to be removed and placed
\r
1659 in the list appropriate to its new priority. */
\r
1660 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
1662 /* The task is currently in its ready list - remove before
\r
1663 adding it to it's new ready list. As we are in a critical
\r
1664 section we can do this even if the scheduler is suspended. */
\r
1665 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1667 /* It is known that the task is in its ready list so
\r
1668 there is no need to check again and the port level
\r
1669 reset macro can be called directly. */
\r
1670 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
\r
1674 mtCOVERAGE_TEST_MARKER();
\r
1676 prvAddTaskToReadyList( pxTCB );
\r
1680 mtCOVERAGE_TEST_MARKER();
\r
1683 if( xYieldRequired != pdFALSE )
\r
1685 taskYIELD_IF_USING_PREEMPTION();
\r
1689 mtCOVERAGE_TEST_MARKER();
\r
1692 /* Remove compiler warning about unused variables when the port
\r
1693 optimised task selection is not being used. */
\r
1694 ( void ) uxPriorityUsedOnEntry;
\r
1697 taskEXIT_CRITICAL();
\r
1700 #endif /* INCLUDE_vTaskPrioritySet */
\r
1701 /*-----------------------------------------------------------*/
\r
1703 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1705 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
\r
1709 taskENTER_CRITICAL();
\r
1711 /* If null is passed in here then it is the running task that is
\r
1712 being suspended. */
\r
1713 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
\r
1715 traceTASK_SUSPEND( pxTCB );
\r
1717 /* Remove task from the ready/delayed list and place in the
\r
1718 suspended list. */
\r
1719 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
1721 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
\r
1725 mtCOVERAGE_TEST_MARKER();
\r
1728 /* Is the task waiting on an event also? */
\r
1729 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
1731 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
1735 mtCOVERAGE_TEST_MARKER();
\r
1738 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
\r
1740 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
1744 for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
\r
1746 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
\r
1748 /* The task was blocked to wait for a notification, but is
\r
1749 now suspended, so no notification was received. */
\r
1750 pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
\r
1756 taskEXIT_CRITICAL();
\r
1758 if( xSchedulerRunning != pdFALSE )
\r
1760 /* Reset the next expected unblock time in case it referred to the
\r
1761 task that is now in the Suspended state. */
\r
1762 taskENTER_CRITICAL();
\r
1764 prvResetNextTaskUnblockTime();
\r
1766 taskEXIT_CRITICAL();
\r
1770 mtCOVERAGE_TEST_MARKER();
\r
1773 if( pxTCB == pxCurrentTCB )
\r
1775 if( xSchedulerRunning != pdFALSE )
\r
1777 /* The current task has just been suspended. */
\r
1778 configASSERT( uxSchedulerSuspended == 0 );
\r
1779 portYIELD_WITHIN_API();
\r
1783 /* The scheduler is not running, but the task that was pointed
\r
1784 to by pxCurrentTCB has just been suspended and pxCurrentTCB
\r
1785 must be adjusted to point to a different task. */
\r
1786 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
\r
1788 /* No other tasks are ready, so set pxCurrentTCB back to
\r
1789 NULL so when the next task is created pxCurrentTCB will
\r
1790 be set to point to it no matter what its relative priority
\r
1792 pxCurrentTCB = NULL;
\r
1796 vTaskSwitchContext();
\r
1802 mtCOVERAGE_TEST_MARKER();
\r
1806 #endif /* INCLUDE_vTaskSuspend */
\r
1807 /*-----------------------------------------------------------*/
\r
1809 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1811 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
\r
1813 BaseType_t xReturn = pdFALSE;
\r
1814 const TCB_t * const pxTCB = xTask;
\r
1816 /* Accesses xPendingReadyList so must be called from a critical
\r
1819 /* It does not make sense to check if the calling task is suspended. */
\r
1820 configASSERT( xTask );
\r
1822 /* Is the task being resumed actually in the suspended list? */
\r
1823 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
1825 /* Has the task already been resumed from within an ISR? */
\r
1826 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
\r
1828 /* Is it in the suspended list because it is in the Suspended
\r
1829 state, or because is is blocked with no timeout? */
\r
1830 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
\r
1836 mtCOVERAGE_TEST_MARKER();
\r
1841 mtCOVERAGE_TEST_MARKER();
\r
1846 mtCOVERAGE_TEST_MARKER();
\r
1850 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
\r
1852 #endif /* INCLUDE_vTaskSuspend */
\r
1853 /*-----------------------------------------------------------*/
\r
1855 #if ( INCLUDE_vTaskSuspend == 1 )
\r
1857 void vTaskResume( TaskHandle_t xTaskToResume )
\r
1859 TCB_t * const pxTCB = xTaskToResume;
\r
1861 /* It does not make sense to resume the calling task. */
\r
1862 configASSERT( xTaskToResume );
\r
1864 /* The parameter cannot be NULL as it is impossible to resume the
\r
1865 currently executing task. */
\r
1866 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
\r
1868 taskENTER_CRITICAL();
\r
1870 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
\r
1872 traceTASK_RESUME( pxTCB );
\r
1874 /* The ready list can be accessed even if the scheduler is
\r
1875 suspended because this is inside a critical section. */
\r
1876 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
1877 prvAddTaskToReadyList( pxTCB );
\r
1879 /* A higher priority task may have just been resumed. */
\r
1880 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
1882 /* This yield may not cause the task just resumed to run,
\r
1883 but will leave the lists in the correct state for the
\r
1885 taskYIELD_IF_USING_PREEMPTION();
\r
1889 mtCOVERAGE_TEST_MARKER();
\r
1894 mtCOVERAGE_TEST_MARKER();
\r
1897 taskEXIT_CRITICAL();
\r
1901 mtCOVERAGE_TEST_MARKER();
\r
1905 #endif /* INCLUDE_vTaskSuspend */
\r
1907 /*-----------------------------------------------------------*/
\r
1909 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
\r
1911 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
\r
1913 BaseType_t xYieldRequired = pdFALSE;
\r
1914 TCB_t * const pxTCB = xTaskToResume;
\r
1915 UBaseType_t uxSavedInterruptStatus;
\r
1917 configASSERT( xTaskToResume );
\r
1919 /* RTOS ports that support interrupt nesting have the concept of a
\r
1920 maximum system call (or maximum API call) interrupt priority.
\r
1921 Interrupts that are above the maximum system call priority are keep
\r
1922 permanently enabled, even when the RTOS kernel is in a critical section,
\r
1923 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
1924 is defined in FreeRTOSConfig.h then
\r
1925 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1926 failure if a FreeRTOS API function is called from an interrupt that has
\r
1927 been assigned a priority above the configured maximum system call
\r
1928 priority. Only FreeRTOS functions that end in FromISR can be called
\r
1929 from interrupts that have been assigned a priority at or (logically)
\r
1930 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
1931 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
1932 simple as possible. More information (albeit Cortex-M specific) is
\r
1933 provided on the following link:
\r
1934 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1935 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1937 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1939 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
\r
1941 traceTASK_RESUME_FROM_ISR( pxTCB );
\r
1943 /* Check the ready lists can be accessed. */
\r
1944 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
1946 /* Ready lists can be accessed so move the task from the
\r
1947 suspended list to the ready list directly. */
\r
1948 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
1950 xYieldRequired = pdTRUE;
\r
1954 mtCOVERAGE_TEST_MARKER();
\r
1957 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
1958 prvAddTaskToReadyList( pxTCB );
\r
1962 /* The delayed or ready lists cannot be accessed so the task
\r
1963 is held in the pending ready list until the scheduler is
\r
1965 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
1970 mtCOVERAGE_TEST_MARKER();
\r
1973 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1975 return xYieldRequired;
\r
1978 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
\r
1979 /*-----------------------------------------------------------*/
\r
1981 void vTaskStartScheduler( void )
\r
1983 BaseType_t xReturn;
\r
1985 /* Add the idle task at the lowest priority. */
\r
1986 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
1988 StaticTask_t *pxIdleTaskTCBBuffer = NULL;
\r
1989 StackType_t *pxIdleTaskStackBuffer = NULL;
\r
1990 uint32_t ulIdleTaskStackSize;
\r
1992 /* The Idle task is created using user provided RAM - obtain the
\r
1993 address of the RAM then create the idle task. */
\r
1994 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
\r
1995 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
\r
1996 configIDLE_TASK_NAME,
\r
1997 ulIdleTaskStackSize,
\r
1998 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
\r
1999 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
\r
2000 pxIdleTaskStackBuffer,
\r
2001 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
\r
2003 if( xIdleTaskHandle != NULL )
\r
2014 /* The Idle task is being created using dynamically allocated RAM. */
\r
2015 xReturn = xTaskCreate( prvIdleTask,
\r
2016 configIDLE_TASK_NAME,
\r
2017 configMINIMAL_STACK_SIZE,
\r
2019 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
\r
2020 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
\r
2022 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
2024 #if ( configUSE_TIMERS == 1 )
\r
2026 if( xReturn == pdPASS )
\r
2028 xReturn = xTimerCreateTimerTask();
\r
2032 mtCOVERAGE_TEST_MARKER();
\r
2035 #endif /* configUSE_TIMERS */
\r
2037 if( xReturn == pdPASS )
\r
2039 /* freertos_tasks_c_additions_init() should only be called if the user
\r
2040 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
\r
2041 the only macro called by the function. */
\r
2042 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
2044 freertos_tasks_c_additions_init();
\r
2048 /* Interrupts are turned off here, to ensure a tick does not occur
\r
2049 before or during the call to xPortStartScheduler(). The stacks of
\r
2050 the created tasks contain a status word with interrupts switched on
\r
2051 so interrupts will automatically get re-enabled when the first task
\r
2053 portDISABLE_INTERRUPTS();
\r
2055 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
2057 /* Switch Newlib's _impure_ptr variable to point to the _reent
\r
2058 structure specific to the task that will run first.
\r
2059 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
2060 for additional information. */
\r
2061 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
\r
2063 #endif /* configUSE_NEWLIB_REENTRANT */
\r
2065 xNextTaskUnblockTime = portMAX_DELAY;
\r
2066 xSchedulerRunning = pdTRUE;
\r
2067 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
\r
2069 /* If configGENERATE_RUN_TIME_STATS is defined then the following
\r
2070 macro must be defined to configure the timer/counter used to generate
\r
2071 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
\r
2072 is set to 0 and the following line fails to build then ensure you do not
\r
2073 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
\r
2074 FreeRTOSConfig.h file. */
\r
2075 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
\r
2077 traceTASK_SWITCHED_IN();
\r
2079 /* Setting up the timer tick is hardware specific and thus in the
\r
2080 portable interface. */
\r
2081 if( xPortStartScheduler() != pdFALSE )
\r
2083 /* Should not reach here as if the scheduler is running the
\r
2084 function will not return. */
\r
2088 /* Should only reach here if a task calls xTaskEndScheduler(). */
\r
2093 /* This line will only be reached if the kernel could not be started,
\r
2094 because there was not enough FreeRTOS heap to create the idle task
\r
2095 or the timer task. */
\r
2096 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
\r
2099 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
\r
2100 meaning xIdleTaskHandle is not used anywhere else. */
\r
2101 ( void ) xIdleTaskHandle;
\r
2103 /*-----------------------------------------------------------*/
\r
2105 void vTaskEndScheduler( void )
\r
2107 /* Stop the scheduler interrupts and call the portable scheduler end
\r
2108 routine so the original ISRs can be restored if necessary. The port
\r
2109 layer must ensure interrupts enable bit is left in the correct state. */
\r
2110 portDISABLE_INTERRUPTS();
\r
2111 xSchedulerRunning = pdFALSE;
\r
2112 vPortEndScheduler();
\r
2114 /*----------------------------------------------------------*/
\r
2116 void vTaskSuspendAll( void )
\r
2118 /* A critical section is not required as the variable is of type
\r
2119 BaseType_t. Please read Richard Barry's reply in the following link to a
\r
2120 post in the FreeRTOS support forum before reporting this as a bug! -
\r
2121 http://goo.gl/wu4acr */
\r
2123 /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
\r
2124 do not otherwise exhibit real time behaviour. */
\r
2125 portSOFTWARE_BARRIER();
\r
2127 /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
\r
2128 is used to allow calls to vTaskSuspendAll() to nest. */
\r
2129 ++uxSchedulerSuspended;
\r
2131 /* Enforces ordering for ports and optimised compilers that may otherwise place
\r
2132 the above increment elsewhere. */
\r
2133 portMEMORY_BARRIER();
\r
2135 /*----------------------------------------------------------*/
\r
2137 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
2139 static TickType_t prvGetExpectedIdleTime( void )
\r
2141 TickType_t xReturn;
\r
2142 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
\r
2144 /* uxHigherPriorityReadyTasks takes care of the case where
\r
2145 configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
\r
2146 task that are in the Ready state, even though the idle task is
\r
2148 #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
\r
2150 if( uxTopReadyPriority > tskIDLE_PRIORITY )
\r
2152 uxHigherPriorityReadyTasks = pdTRUE;
\r
2157 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
\r
2159 /* When port optimised task selection is used the uxTopReadyPriority
\r
2160 variable is used as a bit map. If bits other than the least
\r
2161 significant bit are set then there are tasks that have a priority
\r
2162 above the idle priority that are in the Ready state. This takes
\r
2163 care of the case where the co-operative scheduler is in use. */
\r
2164 if( uxTopReadyPriority > uxLeastSignificantBit )
\r
2166 uxHigherPriorityReadyTasks = pdTRUE;
\r
2171 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
\r
2175 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
\r
2177 /* There are other idle priority tasks in the ready state. If
\r
2178 time slicing is used then the very next tick interrupt must be
\r
2182 else if( uxHigherPriorityReadyTasks != pdFALSE )
\r
2184 /* There are tasks in the Ready state that have a priority above the
\r
2185 idle priority. This path can only be reached if
\r
2186 configUSE_PREEMPTION is 0. */
\r
2191 xReturn = xNextTaskUnblockTime - xTickCount;
\r
2197 #endif /* configUSE_TICKLESS_IDLE */
\r
2198 /*----------------------------------------------------------*/
\r
2200 BaseType_t xTaskResumeAll( void )
\r
2202 TCB_t *pxTCB = NULL;
\r
2203 BaseType_t xAlreadyYielded = pdFALSE;
\r
2205 /* If uxSchedulerSuspended is zero then this function does not match a
\r
2206 previous call to vTaskSuspendAll(). */
\r
2207 configASSERT( uxSchedulerSuspended );
\r
2209 /* It is possible that an ISR caused a task to be removed from an event
\r
2210 list while the scheduler was suspended. If this was the case then the
\r
2211 removed task will have been added to the xPendingReadyList. Once the
\r
2212 scheduler has been resumed it is safe to move all the pending ready
\r
2213 tasks from this list into their appropriate ready list. */
\r
2214 taskENTER_CRITICAL();
\r
2216 --uxSchedulerSuspended;
\r
2218 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
2220 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
\r
2222 /* Move any readied tasks from the pending list into the
\r
2223 appropriate ready list. */
\r
2224 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
\r
2226 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2227 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2228 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2229 prvAddTaskToReadyList( pxTCB );
\r
2231 /* If the moved task has a priority higher than the current
\r
2232 task then a yield must be performed. */
\r
2233 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
2235 xYieldPending = pdTRUE;
\r
2239 mtCOVERAGE_TEST_MARKER();
\r
2243 if( pxTCB != NULL )
\r
2245 /* A task was unblocked while the scheduler was suspended,
\r
2246 which may have prevented the next unblock time from being
\r
2247 re-calculated, in which case re-calculate it now. Mainly
\r
2248 important for low power tickless implementations, where
\r
2249 this can prevent an unnecessary exit from low power
\r
2251 prvResetNextTaskUnblockTime();
\r
2254 /* If any ticks occurred while the scheduler was suspended then
\r
2255 they should be processed now. This ensures the tick count does
\r
2256 not slip, and that any delayed tasks are resumed at the correct
\r
2259 TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
\r
2261 if( xPendedCounts > ( TickType_t ) 0U )
\r
2265 if( xTaskIncrementTick() != pdFALSE )
\r
2267 xYieldPending = pdTRUE;
\r
2271 mtCOVERAGE_TEST_MARKER();
\r
2274 } while( xPendedCounts > ( TickType_t ) 0U );
\r
2280 mtCOVERAGE_TEST_MARKER();
\r
2284 if( xYieldPending != pdFALSE )
\r
2286 #if( configUSE_PREEMPTION != 0 )
\r
2288 xAlreadyYielded = pdTRUE;
\r
2291 taskYIELD_IF_USING_PREEMPTION();
\r
2295 mtCOVERAGE_TEST_MARKER();
\r
2301 mtCOVERAGE_TEST_MARKER();
\r
2304 taskEXIT_CRITICAL();
\r
2306 return xAlreadyYielded;
\r
2308 /*-----------------------------------------------------------*/
\r
2310 TickType_t xTaskGetTickCount( void )
\r
2312 TickType_t xTicks;
\r
2314 /* Critical section required if running on a 16 bit processor. */
\r
2315 portTICK_TYPE_ENTER_CRITICAL();
\r
2317 xTicks = xTickCount;
\r
2319 portTICK_TYPE_EXIT_CRITICAL();
\r
2323 /*-----------------------------------------------------------*/
\r
2325 TickType_t xTaskGetTickCountFromISR( void )
\r
2327 TickType_t xReturn;
\r
2328 UBaseType_t uxSavedInterruptStatus;
\r
2330 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
2331 system call (or maximum API call) interrupt priority. Interrupts that are
\r
2332 above the maximum system call priority are kept permanently enabled, even
\r
2333 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
2334 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
2335 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
2336 failure if a FreeRTOS API function is called from an interrupt that has been
\r
2337 assigned a priority above the configured maximum system call priority.
\r
2338 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
2339 that have been assigned a priority at or (logically) below the maximum
\r
2340 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
2341 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
2342 More information (albeit Cortex-M specific) is provided on the following
\r
2343 link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
2344 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
2346 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
\r
2348 xReturn = xTickCount;
\r
2350 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
2354 /*-----------------------------------------------------------*/
\r
2356 UBaseType_t uxTaskGetNumberOfTasks( void )
\r
2358 /* A critical section is not required because the variables are of type
\r
2360 return uxCurrentNumberOfTasks;
\r
2362 /*-----------------------------------------------------------*/
\r
2364 char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2368 /* If null is passed in here then the name of the calling task is being
\r
2370 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
\r
2371 configASSERT( pxTCB );
\r
2372 return &( pxTCB->pcTaskName[ 0 ] );
\r
2374 /*-----------------------------------------------------------*/
\r
2376 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
2378 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
\r
2380 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
\r
2383 BaseType_t xBreakLoop;
\r
2385 /* This function is called with the scheduler suspended. */
\r
2387 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
\r
2389 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2393 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2395 /* Check each character in the name looking for a match or
\r
2397 xBreakLoop = pdFALSE;
\r
2398 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
\r
2400 cNextChar = pxNextTCB->pcTaskName[ x ];
\r
2402 if( cNextChar != pcNameToQuery[ x ] )
\r
2404 /* Characters didn't match. */
\r
2405 xBreakLoop = pdTRUE;
\r
2407 else if( cNextChar == ( char ) 0x00 )
\r
2409 /* Both strings terminated, a match must have been
\r
2411 pxReturn = pxNextTCB;
\r
2412 xBreakLoop = pdTRUE;
\r
2416 mtCOVERAGE_TEST_MARKER();
\r
2419 if( xBreakLoop != pdFALSE )
\r
2425 if( pxReturn != NULL )
\r
2427 /* The handle has been found. */
\r
2431 } while( pxNextTCB != pxFirstTCB );
\r
2435 mtCOVERAGE_TEST_MARKER();
\r
2441 #endif /* INCLUDE_xTaskGetHandle */
\r
2442 /*-----------------------------------------------------------*/
\r
2444 #if ( INCLUDE_xTaskGetHandle == 1 )
\r
2446 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2448 UBaseType_t uxQueue = configMAX_PRIORITIES;
\r
2451 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
\r
2452 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
\r
2454 vTaskSuspendAll();
\r
2456 /* Search the ready lists. */
\r
2460 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
\r
2462 if( pxTCB != NULL )
\r
2464 /* Found the handle. */
\r
2468 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2470 /* Search the delayed lists. */
\r
2471 if( pxTCB == NULL )
\r
2473 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
\r
2476 if( pxTCB == NULL )
\r
2478 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
\r
2481 #if ( INCLUDE_vTaskSuspend == 1 )
\r
2483 if( pxTCB == NULL )
\r
2485 /* Search the suspended list. */
\r
2486 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
\r
2491 #if( INCLUDE_vTaskDelete == 1 )
\r
2493 if( pxTCB == NULL )
\r
2495 /* Search the deleted list. */
\r
2496 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
\r
2501 ( void ) xTaskResumeAll();
\r
2506 #endif /* INCLUDE_xTaskGetHandle */
\r
2507 /*-----------------------------------------------------------*/
\r
2509 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2511 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
\r
2513 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
\r
2515 vTaskSuspendAll();
\r
2517 /* Is there a space in the array for each task in the system? */
\r
2518 if( uxArraySize >= uxCurrentNumberOfTasks )
\r
2520 /* Fill in an TaskStatus_t structure with information on each
\r
2521 task in the Ready state. */
\r
2525 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
\r
2527 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2529 /* Fill in an TaskStatus_t structure with information on each
\r
2530 task in the Blocked state. */
\r
2531 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
\r
2532 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
\r
2534 #if( INCLUDE_vTaskDelete == 1 )
\r
2536 /* Fill in an TaskStatus_t structure with information on
\r
2537 each task that has been deleted but not yet cleaned up. */
\r
2538 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
\r
2542 #if ( INCLUDE_vTaskSuspend == 1 )
\r
2544 /* Fill in an TaskStatus_t structure with information on
\r
2545 each task in the Suspended state. */
\r
2546 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
\r
2550 #if ( configGENERATE_RUN_TIME_STATS == 1)
\r
2552 if( pulTotalRunTime != NULL )
\r
2554 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
\r
2555 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
\r
2557 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
\r
2563 if( pulTotalRunTime != NULL )
\r
2565 *pulTotalRunTime = 0;
\r
2572 mtCOVERAGE_TEST_MARKER();
\r
2575 ( void ) xTaskResumeAll();
\r
2580 #endif /* configUSE_TRACE_FACILITY */
\r
2581 /*----------------------------------------------------------*/
\r
2583 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
\r
2585 TaskHandle_t xTaskGetIdleTaskHandle( void )
\r
2587 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
\r
2588 started, then xIdleTaskHandle will be NULL. */
\r
2589 configASSERT( ( xIdleTaskHandle != NULL ) );
\r
2590 return xIdleTaskHandle;
\r
2593 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
\r
2594 /*----------------------------------------------------------*/
\r
2596 /* This conditional compilation should use inequality to 0, not equality to 1.
\r
2597 This is to ensure vTaskStepTick() is available when user defined low power mode
\r
2598 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
\r
2600 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
2602 void vTaskStepTick( const TickType_t xTicksToJump )
\r
2604 /* Correct the tick count value after a period during which the tick
\r
2605 was suppressed. Note this does *not* call the tick hook function for
\r
2606 each stepped tick. */
\r
2607 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
\r
2608 xTickCount += xTicksToJump;
\r
2609 traceINCREASE_TICK_COUNT( xTicksToJump );
\r
2612 #endif /* configUSE_TICKLESS_IDLE */
\r
2613 /*----------------------------------------------------------*/
\r
2615 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
\r
2617 BaseType_t xYieldOccurred;
\r
2619 /* Must not be called with the scheduler suspended as the implementation
\r
2620 relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
\r
2621 configASSERT( uxSchedulerSuspended == 0 );
\r
2623 /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
\r
2624 the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
\r
2625 vTaskSuspendAll();
\r
2626 xPendedTicks += xTicksToCatchUp;
\r
2627 xYieldOccurred = xTaskResumeAll();
\r
2629 return xYieldOccurred;
\r
2631 /*----------------------------------------------------------*/
\r
2633 #if ( INCLUDE_xTaskAbortDelay == 1 )
\r
2635 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
\r
2637 TCB_t *pxTCB = xTask;
\r
2638 BaseType_t xReturn;
\r
2640 configASSERT( pxTCB );
\r
2642 vTaskSuspendAll();
\r
2644 /* A task can only be prematurely removed from the Blocked state if
\r
2645 it is actually in the Blocked state. */
\r
2646 if( eTaskGetState( xTask ) == eBlocked )
\r
2650 /* Remove the reference to the task from the blocked list. An
\r
2651 interrupt won't touch the xStateListItem because the
\r
2652 scheduler is suspended. */
\r
2653 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2655 /* Is the task waiting on an event also? If so remove it from
\r
2656 the event list too. Interrupts can touch the event list item,
\r
2657 even though the scheduler is suspended, so a critical section
\r
2659 taskENTER_CRITICAL();
\r
2661 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
2663 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2665 /* This lets the task know it was forcibly removed from the
\r
2666 blocked state so it should not re-evaluate its block time and
\r
2667 then block again. */
\r
2668 pxTCB->ucDelayAborted = pdTRUE;
\r
2672 mtCOVERAGE_TEST_MARKER();
\r
2675 taskEXIT_CRITICAL();
\r
2677 /* Place the unblocked task into the appropriate ready list. */
\r
2678 prvAddTaskToReadyList( pxTCB );
\r
2680 /* A task being unblocked cannot cause an immediate context
\r
2681 switch if preemption is turned off. */
\r
2682 #if ( configUSE_PREEMPTION == 1 )
\r
2684 /* Preemption is on, but a context switch should only be
\r
2685 performed if the unblocked task has a priority that is
\r
2686 equal to or higher than the currently executing task. */
\r
2687 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
2689 /* Pend the yield to be performed when the scheduler
\r
2690 is unsuspended. */
\r
2691 xYieldPending = pdTRUE;
\r
2695 mtCOVERAGE_TEST_MARKER();
\r
2698 #endif /* configUSE_PREEMPTION */
\r
2705 ( void ) xTaskResumeAll();
\r
2710 #endif /* INCLUDE_xTaskAbortDelay */
\r
2711 /*----------------------------------------------------------*/
\r
2713 BaseType_t xTaskIncrementTick( void )
\r
2716 TickType_t xItemValue;
\r
2717 BaseType_t xSwitchRequired = pdFALSE;
\r
2719 /* Called by the portable layer each time a tick interrupt occurs.
\r
2720 Increments the tick then checks to see if the new tick value will cause any
\r
2721 tasks to be unblocked. */
\r
2722 traceTASK_INCREMENT_TICK( xTickCount );
\r
2723 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
2725 /* Minor optimisation. The tick count cannot change in this
\r
2727 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
\r
2729 /* Increment the RTOS tick, switching the delayed and overflowed
\r
2730 delayed lists if it wraps to 0. */
\r
2731 xTickCount = xConstTickCount;
\r
2733 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
\r
2735 taskSWITCH_DELAYED_LISTS();
\r
2739 mtCOVERAGE_TEST_MARKER();
\r
2742 /* See if this tick has made a timeout expire. Tasks are stored in
\r
2743 the queue in the order of their wake time - meaning once one task
\r
2744 has been found whose block time has not expired there is no need to
\r
2745 look any further down the list. */
\r
2746 if( xConstTickCount >= xNextTaskUnblockTime )
\r
2750 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
\r
2752 /* The delayed list is empty. Set xNextTaskUnblockTime
\r
2753 to the maximum possible value so it is extremely
\r
2755 if( xTickCount >= xNextTaskUnblockTime ) test will pass
\r
2756 next time through. */
\r
2757 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
2762 /* The delayed list is not empty, get the value of the
\r
2763 item at the head of the delayed list. This is the time
\r
2764 at which the task at the head of the delayed list must
\r
2765 be removed from the Blocked state. */
\r
2766 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
2767 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
\r
2769 if( xConstTickCount < xItemValue )
\r
2771 /* It is not time to unblock this item yet, but the
\r
2772 item value is the time at which the task at the head
\r
2773 of the blocked list must be removed from the Blocked
\r
2774 state - so record the item value in
\r
2775 xNextTaskUnblockTime. */
\r
2776 xNextTaskUnblockTime = xItemValue;
\r
2777 break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
\r
2781 mtCOVERAGE_TEST_MARKER();
\r
2784 /* It is time to remove the item from the Blocked state. */
\r
2785 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
2787 /* Is the task waiting on an event also? If so remove
\r
2788 it from the event list. */
\r
2789 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
2791 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
\r
2795 mtCOVERAGE_TEST_MARKER();
\r
2798 /* Place the unblocked task into the appropriate ready
\r
2800 prvAddTaskToReadyList( pxTCB );
\r
2802 /* A task being unblocked cannot cause an immediate
\r
2803 context switch if preemption is turned off. */
\r
2804 #if ( configUSE_PREEMPTION == 1 )
\r
2806 /* Preemption is on, but a context switch should
\r
2807 only be performed if the unblocked task has a
\r
2808 priority that is equal to or higher than the
\r
2809 currently executing task. */
\r
2810 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
\r
2812 xSwitchRequired = pdTRUE;
\r
2816 mtCOVERAGE_TEST_MARKER();
\r
2819 #endif /* configUSE_PREEMPTION */
\r
2824 /* Tasks of equal priority to the currently running task will share
\r
2825 processing time (time slice) if preemption is on, and the application
\r
2826 writer has not explicitly turned time slicing off. */
\r
2827 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
\r
2829 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
\r
2831 xSwitchRequired = pdTRUE;
\r
2835 mtCOVERAGE_TEST_MARKER();
\r
2838 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
\r
2840 #if ( configUSE_TICK_HOOK == 1 )
\r
2842 /* Guard against the tick hook being called when the pended tick
\r
2843 count is being unwound (when the scheduler is being unlocked). */
\r
2844 if( xPendedTicks == ( TickType_t ) 0 )
\r
2846 vApplicationTickHook();
\r
2850 mtCOVERAGE_TEST_MARKER();
\r
2853 #endif /* configUSE_TICK_HOOK */
\r
2855 #if ( configUSE_PREEMPTION == 1 )
\r
2857 if( xYieldPending != pdFALSE )
\r
2859 xSwitchRequired = pdTRUE;
\r
2863 mtCOVERAGE_TEST_MARKER();
\r
2866 #endif /* configUSE_PREEMPTION */
\r
2872 /* The tick hook gets called at regular intervals, even if the
\r
2873 scheduler is locked. */
\r
2874 #if ( configUSE_TICK_HOOK == 1 )
\r
2876 vApplicationTickHook();
\r
2881 return xSwitchRequired;
\r
2883 /*-----------------------------------------------------------*/
\r
2885 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2887 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
\r
2891 /* If xTask is NULL then it is the task hook of the calling task that is
\r
2893 if( xTask == NULL )
\r
2895 xTCB = ( TCB_t * ) pxCurrentTCB;
\r
2902 /* Save the hook function in the TCB. A critical section is required as
\r
2903 the value can be accessed from an interrupt. */
\r
2904 taskENTER_CRITICAL();
\r
2906 xTCB->pxTaskTag = pxHookFunction;
\r
2908 taskEXIT_CRITICAL();
\r
2911 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
2912 /*-----------------------------------------------------------*/
\r
2914 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2916 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
\r
2919 TaskHookFunction_t xReturn;
\r
2921 /* If xTask is NULL then set the calling task's hook. */
\r
2922 pxTCB = prvGetTCBFromHandle( xTask );
\r
2924 /* Save the hook function in the TCB. A critical section is required as
\r
2925 the value can be accessed from an interrupt. */
\r
2926 taskENTER_CRITICAL();
\r
2928 xReturn = pxTCB->pxTaskTag;
\r
2930 taskEXIT_CRITICAL();
\r
2935 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
2936 /*-----------------------------------------------------------*/
\r
2938 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2940 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
\r
2943 TaskHookFunction_t xReturn;
\r
2944 UBaseType_t uxSavedInterruptStatus;
\r
2946 /* If xTask is NULL then set the calling task's hook. */
\r
2947 pxTCB = prvGetTCBFromHandle( xTask );
\r
2949 /* Save the hook function in the TCB. A critical section is required as
\r
2950 the value can be accessed from an interrupt. */
\r
2951 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
2953 xReturn = pxTCB->pxTaskTag;
\r
2955 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
2960 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
2961 /*-----------------------------------------------------------*/
\r
2963 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
\r
2965 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
\r
2968 BaseType_t xReturn;
\r
2970 /* If xTask is NULL then we are calling our own task hook. */
\r
2971 if( xTask == NULL )
\r
2973 xTCB = pxCurrentTCB;
\r
2980 if( xTCB->pxTaskTag != NULL )
\r
2982 xReturn = xTCB->pxTaskTag( pvParameter );
\r
2992 #endif /* configUSE_APPLICATION_TASK_TAG */
\r
2993 /*-----------------------------------------------------------*/
\r
2995 void vTaskSwitchContext( void )
\r
2997 if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
\r
2999 /* The scheduler is currently suspended - do not allow a context
\r
3001 xYieldPending = pdTRUE;
\r
3005 xYieldPending = pdFALSE;
\r
3006 traceTASK_SWITCHED_OUT();
\r
3008 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
3010 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
\r
3011 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
\r
3013 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
\r
3016 /* Add the amount of time the task has been running to the
\r
3017 accumulated time so far. The time the task started running was
\r
3018 stored in ulTaskSwitchedInTime. Note that there is no overflow
\r
3019 protection here so count values are only valid until the timer
\r
3020 overflows. The guard against negative values is to protect
\r
3021 against suspect run time stat counter implementations - which
\r
3022 are provided by the application, not the kernel. */
\r
3023 if( ulTotalRunTime > ulTaskSwitchedInTime )
\r
3025 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
\r
3029 mtCOVERAGE_TEST_MARKER();
\r
3031 ulTaskSwitchedInTime = ulTotalRunTime;
\r
3033 #endif /* configGENERATE_RUN_TIME_STATS */
\r
3035 /* Check for stack overflow, if configured. */
\r
3036 taskCHECK_FOR_STACK_OVERFLOW();
\r
3038 /* Before the currently running task is switched out, save its errno. */
\r
3039 #if( configUSE_POSIX_ERRNO == 1 )
\r
3041 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
\r
3045 /* Select a new task to run using either the generic C or port
\r
3046 optimised asm code. */
\r
3047 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3048 traceTASK_SWITCHED_IN();
\r
3050 /* After the new task is switched in, update the global errno. */
\r
3051 #if( configUSE_POSIX_ERRNO == 1 )
\r
3053 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
\r
3057 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
3059 /* Switch Newlib's _impure_ptr variable to point to the _reent
\r
3060 structure specific to this task.
\r
3061 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
3062 for additional information. */
\r
3063 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
\r
3065 #endif /* configUSE_NEWLIB_REENTRANT */
\r
3068 /*-----------------------------------------------------------*/
\r
3070 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
\r
3072 configASSERT( pxEventList );
\r
3074 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
\r
3075 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
\r
3077 /* Place the event list item of the TCB in the appropriate event list.
\r
3078 This is placed in the list in priority order so the highest priority task
\r
3079 is the first to be woken by the event. The queue that contains the event
\r
3080 list is locked, preventing simultaneous access from interrupts. */
\r
3081 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
3083 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
3085 /*-----------------------------------------------------------*/
\r
3087 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
\r
3089 configASSERT( pxEventList );
\r
3091 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
\r
3092 the event groups implementation. */
\r
3093 configASSERT( uxSchedulerSuspended != 0 );
\r
3095 /* Store the item value in the event list item. It is safe to access the
\r
3096 event list item here as interrupts won't access the event list item of a
\r
3097 task that is not in the Blocked state. */
\r
3098 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
\r
3100 /* Place the event list item of the TCB at the end of the appropriate event
\r
3101 list. It is safe to access the event list here because it is part of an
\r
3102 event group implementation - and interrupts don't access event groups
\r
3103 directly (instead they access them indirectly by pending function calls to
\r
3104 the task level). */
\r
3105 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
3107 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
3109 /*-----------------------------------------------------------*/
\r
3111 #if( configUSE_TIMERS == 1 )
\r
3113 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
3115 configASSERT( pxEventList );
\r
3117 /* This function should not be called by application code hence the
\r
3118 'Restricted' in its name. It is not part of the public API. It is
\r
3119 designed for use by kernel code, and has special calling requirements -
\r
3120 it should be called with the scheduler suspended. */
\r
3123 /* Place the event list item of the TCB in the appropriate event list.
\r
3124 In this case it is assume that this is the only task that is going to
\r
3125 be waiting on this event list, so the faster vListInsertEnd() function
\r
3126 can be used in place of vListInsert. */
\r
3127 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
\r
3129 /* If the task should block indefinitely then set the block time to a
\r
3130 value that will be recognised as an indefinite delay inside the
\r
3131 prvAddCurrentTaskToDelayedList() function. */
\r
3132 if( xWaitIndefinitely != pdFALSE )
\r
3134 xTicksToWait = portMAX_DELAY;
\r
3137 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
\r
3138 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
\r
3141 #endif /* configUSE_TIMERS */
\r
3142 /*-----------------------------------------------------------*/
\r
3144 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
\r
3146 TCB_t *pxUnblockedTCB;
\r
3147 BaseType_t xReturn;
\r
3149 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
\r
3150 called from a critical section within an ISR. */
\r
3152 /* The event list is sorted in priority order, so the first in the list can
\r
3153 be removed as it is known to be the highest priority. Remove the TCB from
\r
3154 the delayed list, and add it to the ready list.
\r
3156 If an event is for a queue that is locked then this function will never
\r
3157 get called - the lock count on the queue will get modified instead. This
\r
3158 means exclusive access to the event list is guaranteed here.
\r
3160 This function assumes that a check has already been made to ensure that
\r
3161 pxEventList is not empty. */
\r
3162 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3163 configASSERT( pxUnblockedTCB );
\r
3164 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
\r
3166 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
3168 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
\r
3169 prvAddTaskToReadyList( pxUnblockedTCB );
\r
3171 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3173 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
\r
3174 might be set to the blocked task's time out time. If the task is
\r
3175 unblocked for a reason other than a timeout xNextTaskUnblockTime is
\r
3176 normally left unchanged, because it is automatically reset to a new
\r
3177 value when the tick count equals xNextTaskUnblockTime. However if
\r
3178 tickless idling is used it might be more important to enter sleep mode
\r
3179 at the earliest possible time - so reset xNextTaskUnblockTime here to
\r
3180 ensure it is updated at the earliest possible time. */
\r
3181 prvResetNextTaskUnblockTime();
\r
3187 /* The delayed and ready lists cannot be accessed, so hold this task
\r
3188 pending until the scheduler is resumed. */
\r
3189 vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
\r
3192 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
3194 /* Return true if the task removed from the event list has a higher
\r
3195 priority than the calling task. This allows the calling task to know if
\r
3196 it should force a context switch now. */
\r
3199 /* Mark that a yield is pending in case the user is not using the
\r
3200 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
\r
3201 xYieldPending = pdTRUE;
\r
3205 xReturn = pdFALSE;
\r
3210 /*-----------------------------------------------------------*/
\r
3212 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
\r
3214 TCB_t *pxUnblockedTCB;
\r
3216 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
\r
3217 the event flags implementation. */
\r
3218 configASSERT( uxSchedulerSuspended != pdFALSE );
\r
3220 /* Store the new item value in the event list. */
\r
3221 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
\r
3223 /* Remove the event list form the event flag. Interrupts do not access
\r
3225 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3226 configASSERT( pxUnblockedTCB );
\r
3227 ( void ) uxListRemove( pxEventListItem );
\r
3229 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3231 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
\r
3232 might be set to the blocked task's time out time. If the task is
\r
3233 unblocked for a reason other than a timeout xNextTaskUnblockTime is
\r
3234 normally left unchanged, because it is automatically reset to a new
\r
3235 value when the tick count equals xNextTaskUnblockTime. However if
\r
3236 tickless idling is used it might be more important to enter sleep mode
\r
3237 at the earliest possible time - so reset xNextTaskUnblockTime here to
\r
3238 ensure it is updated at the earliest possible time. */
\r
3239 prvResetNextTaskUnblockTime();
\r
3243 /* Remove the task from the delayed list and add it to the ready list. The
\r
3244 scheduler is suspended so interrupts will not be accessing the ready
\r
3246 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
\r
3247 prvAddTaskToReadyList( pxUnblockedTCB );
\r
3249 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
3251 /* The unblocked task has a priority above that of the calling task, so
\r
3252 a context switch is required. This function is called with the
\r
3253 scheduler suspended so xYieldPending is set so the context switch
\r
3254 occurs immediately that the scheduler is resumed (unsuspended). */
\r
3255 xYieldPending = pdTRUE;
\r
3258 /*-----------------------------------------------------------*/
\r
3260 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
\r
3262 configASSERT( pxTimeOut );
\r
3263 taskENTER_CRITICAL();
\r
3265 pxTimeOut->xOverflowCount = xNumOfOverflows;
\r
3266 pxTimeOut->xTimeOnEntering = xTickCount;
\r
3268 taskEXIT_CRITICAL();
\r
3270 /*-----------------------------------------------------------*/
\r
3272 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
\r
3274 /* For internal use only as it does not use a critical section. */
\r
3275 pxTimeOut->xOverflowCount = xNumOfOverflows;
\r
3276 pxTimeOut->xTimeOnEntering = xTickCount;
\r
3278 /*-----------------------------------------------------------*/
\r
3280 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
\r
3282 BaseType_t xReturn;
\r
3284 configASSERT( pxTimeOut );
\r
3285 configASSERT( pxTicksToWait );
\r
3287 taskENTER_CRITICAL();
\r
3289 /* Minor optimisation. The tick count cannot change in this block. */
\r
3290 const TickType_t xConstTickCount = xTickCount;
\r
3291 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
\r
3293 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
3294 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
\r
3296 /* The delay was aborted, which is not the same as a time out,
\r
3297 but has the same result. */
\r
3298 pxCurrentTCB->ucDelayAborted = pdFALSE;
\r
3304 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3305 if( *pxTicksToWait == portMAX_DELAY )
\r
3307 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
\r
3308 specified is the maximum block time then the task should block
\r
3309 indefinitely, and therefore never time out. */
\r
3310 xReturn = pdFALSE;
\r
3315 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
\r
3317 /* The tick count is greater than the time at which
\r
3318 vTaskSetTimeout() was called, but has also overflowed since
\r
3319 vTaskSetTimeOut() was called. It must have wrapped all the way
\r
3320 around and gone past again. This passed since vTaskSetTimeout()
\r
3324 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
\r
3326 /* Not a genuine timeout. Adjust parameters for time remaining. */
\r
3327 *pxTicksToWait -= xElapsedTime;
\r
3328 vTaskInternalSetTimeOutState( pxTimeOut );
\r
3329 xReturn = pdFALSE;
\r
3333 *pxTicksToWait = 0;
\r
3337 taskEXIT_CRITICAL();
\r
3341 /*-----------------------------------------------------------*/
\r
3343 void vTaskMissedYield( void )
\r
3345 xYieldPending = pdTRUE;
\r
3347 /*-----------------------------------------------------------*/
\r
3349 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3351 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
\r
3353 UBaseType_t uxReturn;
\r
3354 TCB_t const *pxTCB;
\r
3356 if( xTask != NULL )
\r
3359 uxReturn = pxTCB->uxTaskNumber;
\r
3369 #endif /* configUSE_TRACE_FACILITY */
\r
3370 /*-----------------------------------------------------------*/
\r
3372 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3374 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
\r
3378 if( xTask != NULL )
\r
3381 pxTCB->uxTaskNumber = uxHandle;
\r
3385 #endif /* configUSE_TRACE_FACILITY */
\r
3388 * -----------------------------------------------------------
\r
3390 * ----------------------------------------------------------
\r
3392 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
\r
3393 * language extensions. The equivalent prototype for this function is:
\r
3395 * void prvIdleTask( void *pvParameters );
\r
3398 static portTASK_FUNCTION( prvIdleTask, pvParameters )
\r
3400 /* Stop warnings. */
\r
3401 ( void ) pvParameters;
\r
3403 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
\r
3404 SCHEDULER IS STARTED. **/
\r
3406 /* In case a task that has a secure context deletes itself, in which case
\r
3407 the idle task is responsible for deleting the task's secure context, if
\r
3409 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
\r
3413 /* See if any tasks have deleted themselves - if so then the idle task
\r
3414 is responsible for freeing the deleted task's TCB and stack. */
\r
3415 prvCheckTasksWaitingTermination();
\r
3417 #if ( configUSE_PREEMPTION == 0 )
\r
3419 /* If we are not using preemption we keep forcing a task switch to
\r
3420 see if any other task has become available. If we are using
\r
3421 preemption we don't need to do this as any task becoming available
\r
3422 will automatically get the processor anyway. */
\r
3425 #endif /* configUSE_PREEMPTION */
\r
3427 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
\r
3429 /* When using preemption tasks of equal priority will be
\r
3430 timesliced. If a task that is sharing the idle priority is ready
\r
3431 to run then the idle task should yield before the end of the
\r
3434 A critical region is not required here as we are just reading from
\r
3435 the list, and an occasional incorrect value will not matter. If
\r
3436 the ready list at the idle priority contains more than one task
\r
3437 then a task other than the idle task is ready to execute. */
\r
3438 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
\r
3444 mtCOVERAGE_TEST_MARKER();
\r
3447 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
\r
3449 #if ( configUSE_IDLE_HOOK == 1 )
\r
3451 extern void vApplicationIdleHook( void );
\r
3453 /* Call the user defined function from within the idle task. This
\r
3454 allows the application designer to add background functionality
\r
3455 without the overhead of a separate task.
\r
3456 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
\r
3457 CALL A FUNCTION THAT MIGHT BLOCK. */
\r
3458 vApplicationIdleHook();
\r
3460 #endif /* configUSE_IDLE_HOOK */
\r
3462 /* This conditional compilation should use inequality to 0, not equality
\r
3463 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
\r
3464 user defined low power mode implementations require
\r
3465 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
\r
3466 #if ( configUSE_TICKLESS_IDLE != 0 )
\r
3468 TickType_t xExpectedIdleTime;
\r
3470 /* It is not desirable to suspend then resume the scheduler on
\r
3471 each iteration of the idle task. Therefore, a preliminary
\r
3472 test of the expected idle time is performed without the
\r
3473 scheduler suspended. The result here is not necessarily
\r
3475 xExpectedIdleTime = prvGetExpectedIdleTime();
\r
3477 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
\r
3479 vTaskSuspendAll();
\r
3481 /* Now the scheduler is suspended, the expected idle
\r
3482 time can be sampled again, and this time its value can
\r
3484 configASSERT( xNextTaskUnblockTime >= xTickCount );
\r
3485 xExpectedIdleTime = prvGetExpectedIdleTime();
\r
3487 /* Define the following macro to set xExpectedIdleTime to 0
\r
3488 if the application does not want
\r
3489 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
\r
3490 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
\r
3492 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
\r
3494 traceLOW_POWER_IDLE_BEGIN();
\r
3495 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
\r
3496 traceLOW_POWER_IDLE_END();
\r
3500 mtCOVERAGE_TEST_MARKER();
\r
3503 ( void ) xTaskResumeAll();
\r
3507 mtCOVERAGE_TEST_MARKER();
\r
3510 #endif /* configUSE_TICKLESS_IDLE */
\r
3513 /*-----------------------------------------------------------*/
\r
3515 #if( configUSE_TICKLESS_IDLE != 0 )
\r
3517 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
\r
3519 /* The idle task exists in addition to the application tasks. */
\r
3520 const UBaseType_t uxNonApplicationTasks = 1;
\r
3521 eSleepModeStatus eReturn = eStandardSleep;
\r
3523 /* This function must be called from a critical section. */
\r
3525 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
\r
3527 /* A task was made ready while the scheduler was suspended. */
\r
3528 eReturn = eAbortSleep;
\r
3530 else if( xYieldPending != pdFALSE )
\r
3532 /* A yield was pended while the scheduler was suspended. */
\r
3533 eReturn = eAbortSleep;
\r
3535 else if( xPendedTicks != 0 )
\r
3537 /* A tick interrupt has already occurred but was held pending
\r
3538 because the scheduler is suspended. */
\r
3539 eReturn = eAbortSleep;
\r
3543 /* If all the tasks are in the suspended list (which might mean they
\r
3544 have an infinite block time rather than actually being suspended)
\r
3545 then it is safe to turn all clocks off and just wait for external
\r
3547 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
\r
3549 eReturn = eNoTasksWaitingTimeout;
\r
3553 mtCOVERAGE_TEST_MARKER();
\r
3560 #endif /* configUSE_TICKLESS_IDLE */
\r
3561 /*-----------------------------------------------------------*/
\r
3563 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
3565 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
\r
3569 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
\r
3571 pxTCB = prvGetTCBFromHandle( xTaskToSet );
\r
3572 configASSERT( pxTCB != NULL );
\r
3573 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
\r
3577 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
\r
3578 /*-----------------------------------------------------------*/
\r
3580 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
\r
3582 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
\r
3584 void *pvReturn = NULL;
\r
3587 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
\r
3589 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
\r
3590 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
\r
3600 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
\r
3601 /*-----------------------------------------------------------*/
\r
3603 #if ( portUSING_MPU_WRAPPERS == 1 )
\r
3605 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
\r
3609 /* If null is passed in here then we are modifying the MPU settings of
\r
3610 the calling task. */
\r
3611 pxTCB = prvGetTCBFromHandle( xTaskToModify );
\r
3613 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
\r
3616 #endif /* portUSING_MPU_WRAPPERS */
\r
3617 /*-----------------------------------------------------------*/
\r
3619 static void prvInitialiseTaskLists( void )
\r
3621 UBaseType_t uxPriority;
\r
3623 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
\r
3625 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
\r
3628 vListInitialise( &xDelayedTaskList1 );
\r
3629 vListInitialise( &xDelayedTaskList2 );
\r
3630 vListInitialise( &xPendingReadyList );
\r
3632 #if ( INCLUDE_vTaskDelete == 1 )
\r
3634 vListInitialise( &xTasksWaitingTermination );
\r
3636 #endif /* INCLUDE_vTaskDelete */
\r
3638 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3640 vListInitialise( &xSuspendedTaskList );
\r
3642 #endif /* INCLUDE_vTaskSuspend */
\r
3644 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
\r
3646 pxDelayedTaskList = &xDelayedTaskList1;
\r
3647 pxOverflowDelayedTaskList = &xDelayedTaskList2;
\r
3649 /*-----------------------------------------------------------*/
\r
3651 static void prvCheckTasksWaitingTermination( void )
\r
3654 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
\r
3656 #if ( INCLUDE_vTaskDelete == 1 )
\r
3660 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
\r
3661 being called too often in the idle task. */
\r
3662 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
\r
3664 taskENTER_CRITICAL();
\r
3666 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3667 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
3668 --uxCurrentNumberOfTasks;
\r
3669 --uxDeletedTasksWaitingCleanUp;
\r
3671 taskEXIT_CRITICAL();
\r
3673 prvDeleteTCB( pxTCB );
\r
3676 #endif /* INCLUDE_vTaskDelete */
\r
3678 /*-----------------------------------------------------------*/
\r
3680 #if( configUSE_TRACE_FACILITY == 1 )
\r
3682 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
\r
3686 /* xTask is NULL then get the state of the calling task. */
\r
3687 pxTCB = prvGetTCBFromHandle( xTask );
\r
3689 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
\r
3690 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
\r
3691 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
\r
3692 pxTaskStatus->pxStackBase = pxTCB->pxStack;
\r
3693 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
\r
3695 #if ( configUSE_MUTEXES == 1 )
\r
3697 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
\r
3701 pxTaskStatus->uxBasePriority = 0;
\r
3705 #if ( configGENERATE_RUN_TIME_STATS == 1 )
\r
3707 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
\r
3711 pxTaskStatus->ulRunTimeCounter = 0;
\r
3715 /* Obtaining the task state is a little fiddly, so is only done if the
\r
3716 value of eState passed into this function is eInvalid - otherwise the
\r
3717 state is just set to whatever is passed in. */
\r
3718 if( eState != eInvalid )
\r
3720 if( pxTCB == pxCurrentTCB )
\r
3722 pxTaskStatus->eCurrentState = eRunning;
\r
3726 pxTaskStatus->eCurrentState = eState;
\r
3728 #if ( INCLUDE_vTaskSuspend == 1 )
\r
3730 /* If the task is in the suspended list then there is a
\r
3731 chance it is actually just blocked indefinitely - so really
\r
3732 it should be reported as being in the Blocked state. */
\r
3733 if( eState == eSuspended )
\r
3735 vTaskSuspendAll();
\r
3737 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
\r
3739 pxTaskStatus->eCurrentState = eBlocked;
\r
3742 ( void ) xTaskResumeAll();
\r
3745 #endif /* INCLUDE_vTaskSuspend */
\r
3750 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
\r
3753 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
\r
3754 parameter is provided to allow it to be skipped. */
\r
3755 if( xGetFreeStackSpace != pdFALSE )
\r
3757 #if ( portSTACK_GROWTH > 0 )
\r
3759 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
\r
3763 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
\r
3769 pxTaskStatus->usStackHighWaterMark = 0;
\r
3773 #endif /* configUSE_TRACE_FACILITY */
\r
3774 /*-----------------------------------------------------------*/
\r
3776 #if ( configUSE_TRACE_FACILITY == 1 )
\r
3778 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
\r
3780 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
\r
3781 UBaseType_t uxTask = 0;
\r
3783 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
\r
3785 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3787 /* Populate an TaskStatus_t structure within the
\r
3788 pxTaskStatusArray array for each task that is referenced from
\r
3789 pxList. See the definition of TaskStatus_t in task.h for the
\r
3790 meaning of each TaskStatus_t structure member. */
\r
3793 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
\r
3794 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
\r
3796 } while( pxNextTCB != pxFirstTCB );
\r
3800 mtCOVERAGE_TEST_MARKER();
\r
3806 #endif /* configUSE_TRACE_FACILITY */
\r
3807 /*-----------------------------------------------------------*/
\r
3809 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
\r
3811 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
\r
3813 uint32_t ulCount = 0U;
\r
3815 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
\r
3817 pucStackByte -= portSTACK_GROWTH;
\r
3821 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
\r
3823 return ( configSTACK_DEPTH_TYPE ) ulCount;
\r
3826 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
\r
3827 /*-----------------------------------------------------------*/
\r
3829 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
\r
3831 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
\r
3832 same except for their return type. Using configSTACK_DEPTH_TYPE allows the
\r
3833 user to determine the return type. It gets around the problem of the value
\r
3834 overflowing on 8-bit types without breaking backward compatibility for
\r
3835 applications that expect an 8-bit return type. */
\r
3836 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
\r
3839 uint8_t *pucEndOfStack;
\r
3840 configSTACK_DEPTH_TYPE uxReturn;
\r
3842 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
\r
3843 the same except for their return type. Using configSTACK_DEPTH_TYPE
\r
3844 allows the user to determine the return type. It gets around the
\r
3845 problem of the value overflowing on 8-bit types without breaking
\r
3846 backward compatibility for applications that expect an 8-bit return
\r
3849 pxTCB = prvGetTCBFromHandle( xTask );
\r
3851 #if portSTACK_GROWTH < 0
\r
3853 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
\r
3857 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
\r
3861 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
\r
3866 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
\r
3867 /*-----------------------------------------------------------*/
\r
3869 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
\r
3871 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
\r
3874 uint8_t *pucEndOfStack;
\r
3875 UBaseType_t uxReturn;
\r
3877 pxTCB = prvGetTCBFromHandle( xTask );
\r
3879 #if portSTACK_GROWTH < 0
\r
3881 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
\r
3885 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
\r
3889 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
\r
3894 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
\r
3895 /*-----------------------------------------------------------*/
\r
3897 #if ( INCLUDE_vTaskDelete == 1 )
\r
3899 static void prvDeleteTCB( TCB_t *pxTCB )
\r
3901 /* This call is required specifically for the TriCore port. It must be
\r
3902 above the vPortFree() calls. The call is also used by ports/demos that
\r
3903 want to allocate and clean RAM statically. */
\r
3904 portCLEAN_UP_TCB( pxTCB );
\r
3906 /* Free up the memory allocated by the scheduler for the task. It is up
\r
3907 to the task to free any memory allocated at the application level.
\r
3908 See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
\r
3909 for additional information. */
\r
3910 #if ( configUSE_NEWLIB_REENTRANT == 1 )
\r
3912 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
\r
3914 #endif /* configUSE_NEWLIB_REENTRANT */
\r
3916 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
\r
3918 /* The task can only have been allocated dynamically - free both
\r
3919 the stack and TCB. */
\r
3920 vPortFree( pxTCB->pxStack );
\r
3921 vPortFree( pxTCB );
\r
3923 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
\r
3925 /* The task could have been allocated statically or dynamically, so
\r
3926 check what was statically allocated before trying to free the
\r
3928 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
\r
3930 /* Both the stack and TCB were allocated dynamically, so both
\r
3932 vPortFree( pxTCB->pxStack );
\r
3933 vPortFree( pxTCB );
\r
3935 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
\r
3937 /* Only the stack was statically allocated, so the TCB is the
\r
3938 only memory that must be freed. */
\r
3939 vPortFree( pxTCB );
\r
3943 /* Neither the stack nor the TCB were allocated dynamically, so
\r
3944 nothing needs to be freed. */
\r
3945 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
\r
3946 mtCOVERAGE_TEST_MARKER();
\r
3949 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
3952 #endif /* INCLUDE_vTaskDelete */
\r
3953 /*-----------------------------------------------------------*/
\r
3955 static void prvResetNextTaskUnblockTime( void )
\r
3957 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
\r
3959 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
\r
3960 the maximum possible value so it is extremely unlikely that the
\r
3961 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
\r
3962 there is an item in the delayed list. */
\r
3963 xNextTaskUnblockTime = portMAX_DELAY;
\r
3967 /* The new current delayed list is not empty, get the value of
\r
3968 the item at the head of the delayed list. This is the time at
\r
3969 which the task at the head of the delayed list should be removed
\r
3970 from the Blocked state. */
\r
3971 xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList );
\r
3974 /*-----------------------------------------------------------*/
\r
3976 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
\r
3978 TaskHandle_t xTaskGetCurrentTaskHandle( void )
\r
3980 TaskHandle_t xReturn;
\r
3982 /* A critical section is not required as this is not called from
\r
3983 an interrupt and the current TCB will always be the same for any
\r
3984 individual execution thread. */
\r
3985 xReturn = pxCurrentTCB;
\r
3990 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
\r
3991 /*-----------------------------------------------------------*/
\r
3993 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
3995 BaseType_t xTaskGetSchedulerState( void )
\r
3997 BaseType_t xReturn;
\r
3999 if( xSchedulerRunning == pdFALSE )
\r
4001 xReturn = taskSCHEDULER_NOT_STARTED;
\r
4005 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
4007 xReturn = taskSCHEDULER_RUNNING;
\r
4011 xReturn = taskSCHEDULER_SUSPENDED;
\r
4018 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
\r
4019 /*-----------------------------------------------------------*/
\r
4021 #if ( configUSE_MUTEXES == 1 )
\r
4023 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
\r
4025 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
\r
4026 BaseType_t xReturn = pdFALSE;
\r
4028 /* If the mutex was given back by an interrupt while the queue was
\r
4029 locked then the mutex holder might now be NULL. _RB_ Is this still
\r
4030 needed as interrupts can no longer use mutexes? */
\r
4031 if( pxMutexHolder != NULL )
\r
4033 /* If the holder of the mutex has a priority below the priority of
\r
4034 the task attempting to obtain the mutex then it will temporarily
\r
4035 inherit the priority of the task attempting to obtain the mutex. */
\r
4036 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
\r
4038 /* Adjust the mutex holder state to account for its new
\r
4039 priority. Only reset the event list item value if the value is
\r
4040 not being used for anything else. */
\r
4041 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
4043 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4047 mtCOVERAGE_TEST_MARKER();
\r
4050 /* If the task being modified is in the ready state it will need
\r
4051 to be moved into a new list. */
\r
4052 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
\r
4054 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4056 /* It is known that the task is in its ready list so
\r
4057 there is no need to check again and the port level
\r
4058 reset macro can be called directly. */
\r
4059 portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
\r
4063 mtCOVERAGE_TEST_MARKER();
\r
4066 /* Inherit the priority before being moved into the new list. */
\r
4067 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
\r
4068 prvAddTaskToReadyList( pxMutexHolderTCB );
\r
4072 /* Just inherit the priority. */
\r
4073 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
\r
4076 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
\r
4078 /* Inheritance occurred. */
\r
4083 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
\r
4085 /* The base priority of the mutex holder is lower than the
\r
4086 priority of the task attempting to take the mutex, but the
\r
4087 current priority of the mutex holder is not lower than the
\r
4088 priority of the task attempting to take the mutex.
\r
4089 Therefore the mutex holder must have already inherited a
\r
4090 priority, but inheritance would have occurred if that had
\r
4091 not been the case. */
\r
4096 mtCOVERAGE_TEST_MARKER();
\r
4102 mtCOVERAGE_TEST_MARKER();
\r
4108 #endif /* configUSE_MUTEXES */
\r
4109 /*-----------------------------------------------------------*/
\r
4111 #if ( configUSE_MUTEXES == 1 )
\r
4113 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
\r
4115 TCB_t * const pxTCB = pxMutexHolder;
\r
4116 BaseType_t xReturn = pdFALSE;
\r
4118 if( pxMutexHolder != NULL )
\r
4120 /* A task can only have an inherited priority if it holds the mutex.
\r
4121 If the mutex is held by a task then it cannot be given from an
\r
4122 interrupt, and if a mutex is given by the holding task then it must
\r
4123 be the running state task. */
\r
4124 configASSERT( pxTCB == pxCurrentTCB );
\r
4125 configASSERT( pxTCB->uxMutexesHeld );
\r
4126 ( pxTCB->uxMutexesHeld )--;
\r
4128 /* Has the holder of the mutex inherited the priority of another
\r
4130 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
\r
4132 /* Only disinherit if no other mutexes are held. */
\r
4133 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
\r
4135 /* A task can only have an inherited priority if it holds
\r
4136 the mutex. If the mutex is held by a task then it cannot be
\r
4137 given from an interrupt, and if a mutex is given by the
\r
4138 holding task then it must be the running state task. Remove
\r
4139 the holding task from the ready list. */
\r
4140 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4142 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
\r
4146 mtCOVERAGE_TEST_MARKER();
\r
4149 /* Disinherit the priority before adding the task into the
\r
4150 new ready list. */
\r
4151 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
\r
4152 pxTCB->uxPriority = pxTCB->uxBasePriority;
\r
4154 /* Reset the event list item value. It cannot be in use for
\r
4155 any other purpose if this task is running, and it must be
\r
4156 running to give back the mutex. */
\r
4157 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4158 prvAddTaskToReadyList( pxTCB );
\r
4160 /* Return true to indicate that a context switch is required.
\r
4161 This is only actually required in the corner case whereby
\r
4162 multiple mutexes were held and the mutexes were given back
\r
4163 in an order different to that in which they were taken.
\r
4164 If a context switch did not occur when the first mutex was
\r
4165 returned, even if a task was waiting on it, then a context
\r
4166 switch should occur when the last mutex is returned whether
\r
4167 a task is waiting on it or not. */
\r
4172 mtCOVERAGE_TEST_MARKER();
\r
4177 mtCOVERAGE_TEST_MARKER();
\r
4182 mtCOVERAGE_TEST_MARKER();
\r
4188 #endif /* configUSE_MUTEXES */
\r
4189 /*-----------------------------------------------------------*/
\r
4191 #if ( configUSE_MUTEXES == 1 )
\r
4193 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
\r
4195 TCB_t * const pxTCB = pxMutexHolder;
\r
4196 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
\r
4197 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
\r
4199 if( pxMutexHolder != NULL )
\r
4201 /* If pxMutexHolder is not NULL then the holder must hold at least
\r
4203 configASSERT( pxTCB->uxMutexesHeld );
\r
4205 /* Determine the priority to which the priority of the task that
\r
4206 holds the mutex should be set. This will be the greater of the
\r
4207 holding task's base priority and the priority of the highest
\r
4208 priority task that is waiting to obtain the mutex. */
\r
4209 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
\r
4211 uxPriorityToUse = uxHighestPriorityWaitingTask;
\r
4215 uxPriorityToUse = pxTCB->uxBasePriority;
\r
4218 /* Does the priority need to change? */
\r
4219 if( pxTCB->uxPriority != uxPriorityToUse )
\r
4221 /* Only disinherit if no other mutexes are held. This is a
\r
4222 simplification in the priority inheritance implementation. If
\r
4223 the task that holds the mutex is also holding other mutexes then
\r
4224 the other mutexes may have caused the priority inheritance. */
\r
4225 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
\r
4227 /* If a task has timed out because it already holds the
\r
4228 mutex it was trying to obtain then it cannot of inherited
\r
4229 its own priority. */
\r
4230 configASSERT( pxTCB != pxCurrentTCB );
\r
4232 /* Disinherit the priority, remembering the previous
\r
4233 priority to facilitate determining the subject task's
\r
4235 traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse );
\r
4236 uxPriorityUsedOnEntry = pxTCB->uxPriority;
\r
4237 pxTCB->uxPriority = uxPriorityToUse;
\r
4239 /* Only reset the event list item value if the value is not
\r
4240 being used for anything else. */
\r
4241 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
\r
4243 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4247 mtCOVERAGE_TEST_MARKER();
\r
4250 /* If the running task is not the task that holds the mutex
\r
4251 then the task that holds the mutex could be in either the
\r
4252 Ready, Blocked or Suspended states. Only remove the task
\r
4253 from its current state list if it is in the Ready state as
\r
4254 the task's priority is going to change and there is one
\r
4255 Ready list per priority. */
\r
4256 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
\r
4258 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
4260 /* It is known that the task is in its ready list so
\r
4261 there is no need to check again and the port level
\r
4262 reset macro can be called directly. */
\r
4263 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
\r
4267 mtCOVERAGE_TEST_MARKER();
\r
4270 prvAddTaskToReadyList( pxTCB );
\r
4274 mtCOVERAGE_TEST_MARKER();
\r
4279 mtCOVERAGE_TEST_MARKER();
\r
4284 mtCOVERAGE_TEST_MARKER();
\r
4289 mtCOVERAGE_TEST_MARKER();
\r
4293 #endif /* configUSE_MUTEXES */
\r
4294 /*-----------------------------------------------------------*/
\r
4296 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
4298 void vTaskEnterCritical( void )
\r
4300 portDISABLE_INTERRUPTS();
\r
4302 if( xSchedulerRunning != pdFALSE )
\r
4304 ( pxCurrentTCB->uxCriticalNesting )++;
\r
4306 /* This is not the interrupt safe version of the enter critical
\r
4307 function so assert() if it is being called from an interrupt
\r
4308 context. Only API functions that end in "FromISR" can be used in an
\r
4309 interrupt. Only assert if the critical nesting count is 1 to
\r
4310 protect against recursive calls if the assert function also uses a
\r
4311 critical section. */
\r
4312 if( pxCurrentTCB->uxCriticalNesting == 1 )
\r
4314 portASSERT_IF_IN_ISR();
\r
4319 mtCOVERAGE_TEST_MARKER();
\r
4323 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
4324 /*-----------------------------------------------------------*/
\r
4326 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
\r
4328 void vTaskExitCritical( void )
\r
4330 if( xSchedulerRunning != pdFALSE )
\r
4332 if( pxCurrentTCB->uxCriticalNesting > 0U )
\r
4334 ( pxCurrentTCB->uxCriticalNesting )--;
\r
4336 if( pxCurrentTCB->uxCriticalNesting == 0U )
\r
4338 portENABLE_INTERRUPTS();
\r
4342 mtCOVERAGE_TEST_MARKER();
\r
4347 mtCOVERAGE_TEST_MARKER();
\r
4352 mtCOVERAGE_TEST_MARKER();
\r
4356 #endif /* portCRITICAL_NESTING_IN_TCB */
\r
4357 /*-----------------------------------------------------------*/
\r
4359 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
\r
4361 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
\r
4365 /* Start by copying the entire string. */
\r
4366 strcpy( pcBuffer, pcTaskName );
\r
4368 /* Pad the end of the string with spaces to ensure columns line up when
\r
4370 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
\r
4372 pcBuffer[ x ] = ' ';
\r
4376 pcBuffer[ x ] = ( char ) 0x00;
\r
4378 /* Return the new end of string. */
\r
4379 return &( pcBuffer[ x ] );
\r
4382 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
\r
4383 /*-----------------------------------------------------------*/
\r
4385 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
4387 void vTaskList( char * pcWriteBuffer )
\r
4389 TaskStatus_t *pxTaskStatusArray;
\r
4390 UBaseType_t uxArraySize, x;
\r
4396 * This function is provided for convenience only, and is used by many
\r
4397 * of the demo applications. Do not consider it to be part of the
\r
4400 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
\r
4401 * uxTaskGetSystemState() output into a human readable table that
\r
4402 * displays task names, states and stack usage.
\r
4404 * vTaskList() has a dependency on the sprintf() C library function that
\r
4405 * might bloat the code size, use a lot of stack, and provide different
\r
4406 * results on different platforms. An alternative, tiny, third party,
\r
4407 * and limited functionality implementation of sprintf() is provided in
\r
4408 * many of the FreeRTOS/Demo sub-directories in a file called
\r
4409 * printf-stdarg.c (note printf-stdarg.c does not provide a full
\r
4410 * snprintf() implementation!).
\r
4412 * It is recommended that production systems call uxTaskGetSystemState()
\r
4413 * directly to get access to raw stats data, rather than indirectly
\r
4414 * through a call to vTaskList().
\r
4418 /* Make sure the write buffer does not contain a string. */
\r
4419 *pcWriteBuffer = ( char ) 0x00;
\r
4421 /* Take a snapshot of the number of tasks in case it changes while this
\r
4422 function is executing. */
\r
4423 uxArraySize = uxCurrentNumberOfTasks;
\r
4425 /* Allocate an array index for each task. NOTE! if
\r
4426 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
\r
4427 equate to NULL. */
\r
4428 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
\r
4430 if( pxTaskStatusArray != NULL )
\r
4432 /* Generate the (binary) data. */
\r
4433 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
\r
4435 /* Create a human readable table from the binary data. */
\r
4436 for( x = 0; x < uxArraySize; x++ )
\r
4438 switch( pxTaskStatusArray[ x ].eCurrentState )
\r
4440 case eRunning: cStatus = tskRUNNING_CHAR;
\r
4443 case eReady: cStatus = tskREADY_CHAR;
\r
4446 case eBlocked: cStatus = tskBLOCKED_CHAR;
\r
4449 case eSuspended: cStatus = tskSUSPENDED_CHAR;
\r
4452 case eDeleted: cStatus = tskDELETED_CHAR;
\r
4455 case eInvalid: /* Fall through. */
\r
4456 default: /* Should not get here, but it is included
\r
4457 to prevent static checking errors. */
\r
4458 cStatus = ( char ) 0x00;
\r
4462 /* Write the task name to the string, padding with spaces so it
\r
4463 can be printed in tabular form more easily. */
\r
4464 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
\r
4466 /* Write the rest of the string. */
\r
4467 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
\r
4468 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
\r
4471 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
\r
4472 is 0 then vPortFree() will be #defined to nothing. */
\r
4473 vPortFree( pxTaskStatusArray );
\r
4477 mtCOVERAGE_TEST_MARKER();
\r
4481 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
4482 /*----------------------------------------------------------*/
\r
4484 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
4486 void vTaskGetRunTimeStats( char *pcWriteBuffer )
\r
4488 TaskStatus_t *pxTaskStatusArray;
\r
4489 UBaseType_t uxArraySize, x;
\r
4490 uint32_t ulTotalTime, ulStatsAsPercentage;
\r
4492 #if( configUSE_TRACE_FACILITY != 1 )
\r
4494 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
\r
4501 * This function is provided for convenience only, and is used by many
\r
4502 * of the demo applications. Do not consider it to be part of the
\r
4505 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
\r
4506 * of the uxTaskGetSystemState() output into a human readable table that
\r
4507 * displays the amount of time each task has spent in the Running state
\r
4508 * in both absolute and percentage terms.
\r
4510 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
\r
4511 * function that might bloat the code size, use a lot of stack, and
\r
4512 * provide different results on different platforms. An alternative,
\r
4513 * tiny, third party, and limited functionality implementation of
\r
4514 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
\r
4515 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
\r
4516 * a full snprintf() implementation!).
\r
4518 * It is recommended that production systems call uxTaskGetSystemState()
\r
4519 * directly to get access to raw stats data, rather than indirectly
\r
4520 * through a call to vTaskGetRunTimeStats().
\r
4523 /* Make sure the write buffer does not contain a string. */
\r
4524 *pcWriteBuffer = ( char ) 0x00;
\r
4526 /* Take a snapshot of the number of tasks in case it changes while this
\r
4527 function is executing. */
\r
4528 uxArraySize = uxCurrentNumberOfTasks;
\r
4530 /* Allocate an array index for each task. NOTE! If
\r
4531 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
\r
4532 equate to NULL. */
\r
4533 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
\r
4535 if( pxTaskStatusArray != NULL )
\r
4537 /* Generate the (binary) data. */
\r
4538 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
\r
4540 /* For percentage calculations. */
\r
4541 ulTotalTime /= 100UL;
\r
4543 /* Avoid divide by zero errors. */
\r
4544 if( ulTotalTime > 0UL )
\r
4546 /* Create a human readable table from the binary data. */
\r
4547 for( x = 0; x < uxArraySize; x++ )
\r
4549 /* What percentage of the total run time has the task used?
\r
4550 This will always be rounded down to the nearest integer.
\r
4551 ulTotalRunTimeDiv100 has already been divided by 100. */
\r
4552 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
\r
4554 /* Write the task name to the string, padding with
\r
4555 spaces so it can be printed in tabular form more
\r
4557 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
\r
4559 if( ulStatsAsPercentage > 0UL )
\r
4561 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
\r
4563 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
\r
4567 /* sizeof( int ) == sizeof( long ) so a smaller
\r
4568 printf() library can be used. */
\r
4569 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
\r
4575 /* If the percentage is zero here then the task has
\r
4576 consumed less than 1% of the total run time. */
\r
4577 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
\r
4579 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
\r
4583 /* sizeof( int ) == sizeof( long ) so a smaller
\r
4584 printf() library can be used. */
\r
4585 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
\r
4590 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
\r
4595 mtCOVERAGE_TEST_MARKER();
\r
4598 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
\r
4599 is 0 then vPortFree() will be #defined to nothing. */
\r
4600 vPortFree( pxTaskStatusArray );
\r
4604 mtCOVERAGE_TEST_MARKER();
\r
4608 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
\r
4609 /*-----------------------------------------------------------*/
\r
4611 TickType_t uxTaskResetEventItemValue( void )
\r
4613 TickType_t uxReturn;
\r
4615 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
\r
4617 /* Reset the event list item to its normal value - so it can be used with
\r
4618 queues and semaphores. */
\r
4619 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
4623 /*-----------------------------------------------------------*/
\r
4625 #if ( configUSE_MUTEXES == 1 )
\r
4627 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
\r
4629 /* If xSemaphoreCreateMutex() is called before any tasks have been created
\r
4630 then pxCurrentTCB will be NULL. */
\r
4631 if( pxCurrentTCB != NULL )
\r
4633 ( pxCurrentTCB->uxMutexesHeld )++;
\r
4636 return pxCurrentTCB;
\r
4639 #endif /* configUSE_MUTEXES */
\r
4640 /*-----------------------------------------------------------*/
\r
4642 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4644 uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait, BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
\r
4646 uint32_t ulReturn;
\r
4648 configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
\r
4650 taskENTER_CRITICAL();
\r
4652 /* Only block if the notification count is not already non-zero. */
\r
4653 if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL )
\r
4655 /* Mark this task as waiting for a notification. */
\r
4656 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
\r
4658 if( xTicksToWait > ( TickType_t ) 0 )
\r
4660 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
4661 traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait );
\r
4663 /* All ports are written to allow a yield in a critical
\r
4664 section (some will yield immediately, others wait until the
\r
4665 critical section exits) - but it is not something that
\r
4666 application code should ever do. */
\r
4667 portYIELD_WITHIN_API();
\r
4671 mtCOVERAGE_TEST_MARKER();
\r
4676 mtCOVERAGE_TEST_MARKER();
\r
4679 taskEXIT_CRITICAL();
\r
4681 taskENTER_CRITICAL();
\r
4683 traceTASK_NOTIFY_TAKE( uxIndexToWait );
\r
4684 ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ];
\r
4686 if( ulReturn != 0UL )
\r
4688 if( xClearCountOnExit != pdFALSE )
\r
4690 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = 0UL;
\r
4694 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1;
\r
4699 mtCOVERAGE_TEST_MARKER();
\r
4702 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
\r
4704 taskEXIT_CRITICAL();
\r
4709 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4710 /*-----------------------------------------------------------*/
\r
4712 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4714 BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait,
\r
4715 uint32_t ulBitsToClearOnEntry,
\r
4716 uint32_t ulBitsToClearOnExit,
\r
4717 uint32_t *pulNotificationValue,
\r
4718 TickType_t xTicksToWait )
\r
4720 BaseType_t xReturn;
\r
4722 configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
\r
4724 taskENTER_CRITICAL();
\r
4726 /* Only block if a notification is not already pending. */
\r
4727 if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
\r
4729 /* Clear bits in the task's notification value as bits may get
\r
4730 set by the notifying task or interrupt. This can be used to
\r
4731 clear the value to zero. */
\r
4732 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry;
\r
4734 /* Mark this task as waiting for a notification. */
\r
4735 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
\r
4737 if( xTicksToWait > ( TickType_t ) 0 )
\r
4739 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
\r
4740 traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait );
\r
4742 /* All ports are written to allow a yield in a critical
\r
4743 section (some will yield immediately, others wait until the
\r
4744 critical section exits) - but it is not something that
\r
4745 application code should ever do. */
\r
4746 portYIELD_WITHIN_API();
\r
4750 mtCOVERAGE_TEST_MARKER();
\r
4755 mtCOVERAGE_TEST_MARKER();
\r
4758 taskEXIT_CRITICAL();
\r
4760 taskENTER_CRITICAL();
\r
4762 traceTASK_NOTIFY_WAIT( uxIndexToWait );
\r
4764 if( pulNotificationValue != NULL )
\r
4766 /* Output the current notification value, which may or may not
\r
4768 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ];
\r
4771 /* If ucNotifyValue is set then either the task never entered the
\r
4772 blocked state (because a notification was already pending) or the
\r
4773 task unblocked because of a notification. Otherwise the task
\r
4774 unblocked because of a timeout. */
\r
4775 if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
\r
4777 /* A notification was not received. */
\r
4778 xReturn = pdFALSE;
\r
4782 /* A notification was already pending or a notification was
\r
4783 received while the task was waiting. */
\r
4784 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit;
\r
4788 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
\r
4790 taskEXIT_CRITICAL();
\r
4795 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4796 /*-----------------------------------------------------------*/
\r
4798 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4800 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
\r
4801 UBaseType_t uxIndexToNotify,
\r
4803 eNotifyAction eAction,
\r
4804 uint32_t *pulPreviousNotificationValue )
\r
4807 BaseType_t xReturn = pdPASS;
\r
4808 uint8_t ucOriginalNotifyState;
\r
4810 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
\r
4811 configASSERT( xTaskToNotify );
\r
4812 pxTCB = xTaskToNotify;
\r
4814 taskENTER_CRITICAL();
\r
4816 if( pulPreviousNotificationValue != NULL )
\r
4818 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
\r
4821 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
\r
4823 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
\r
4828 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
\r
4832 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
\r
4835 case eSetValueWithOverwrite :
\r
4836 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
\r
4839 case eSetValueWithoutOverwrite :
\r
4840 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
\r
4842 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
\r
4846 /* The value could not be written to the task. */
\r
4852 /* The task is being notified without its notify value being
\r
4857 /* Should not get here if all enums are handled.
\r
4858 Artificially force an assert by testing a value the
\r
4859 compiler can't assume is const. */
\r
4860 configASSERT( pxTCB->ulNotifiedValue[ uxIndexToNotify ] == ~0UL );
\r
4865 traceTASK_NOTIFY( uxIndexToNotify );
\r
4867 /* If the task is in the blocked state specifically to wait for a
\r
4868 notification then unblock it now. */
\r
4869 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
4871 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
4872 prvAddTaskToReadyList( pxTCB );
\r
4874 /* The task should not have been on an event list. */
\r
4875 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
4877 #if( configUSE_TICKLESS_IDLE != 0 )
\r
4879 /* If a task is blocked waiting for a notification then
\r
4880 xNextTaskUnblockTime might be set to the blocked task's time
\r
4881 out time. If the task is unblocked for a reason other than
\r
4882 a timeout xNextTaskUnblockTime is normally left unchanged,
\r
4883 because it will automatically get reset to a new value when
\r
4884 the tick count equals xNextTaskUnblockTime. However if
\r
4885 tickless idling is used it might be more important to enter
\r
4886 sleep mode at the earliest possible time - so reset
\r
4887 xNextTaskUnblockTime here to ensure it is updated at the
\r
4888 earliest possible time. */
\r
4889 prvResetNextTaskUnblockTime();
\r
4893 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
4895 /* The notified task has a priority above the currently
\r
4896 executing task so a yield is required. */
\r
4897 taskYIELD_IF_USING_PREEMPTION();
\r
4901 mtCOVERAGE_TEST_MARKER();
\r
4906 mtCOVERAGE_TEST_MARKER();
\r
4909 taskEXIT_CRITICAL();
\r
4914 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
4915 /*-----------------------------------------------------------*/
\r
4917 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
4919 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
\r
4920 UBaseType_t uxIndexToNotify,
\r
4922 eNotifyAction eAction,
\r
4923 uint32_t *pulPreviousNotificationValue,
\r
4924 BaseType_t *pxHigherPriorityTaskWoken )
\r
4927 uint8_t ucOriginalNotifyState;
\r
4928 BaseType_t xReturn = pdPASS;
\r
4929 UBaseType_t uxSavedInterruptStatus;
\r
4931 configASSERT( xTaskToNotify );
\r
4932 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
\r
4934 /* RTOS ports that support interrupt nesting have the concept of a
\r
4935 maximum system call (or maximum API call) interrupt priority.
\r
4936 Interrupts that are above the maximum system call priority are keep
\r
4937 permanently enabled, even when the RTOS kernel is in a critical section,
\r
4938 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
4939 is defined in FreeRTOSConfig.h then
\r
4940 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
4941 failure if a FreeRTOS API function is called from an interrupt that has
\r
4942 been assigned a priority above the configured maximum system call
\r
4943 priority. Only FreeRTOS functions that end in FromISR can be called
\r
4944 from interrupts that have been assigned a priority at or (logically)
\r
4945 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
4946 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
4947 simple as possible. More information (albeit Cortex-M specific) is
\r
4948 provided on the following link:
\r
4949 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
4950 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
4952 pxTCB = xTaskToNotify;
\r
4954 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
4956 if( pulPreviousNotificationValue != NULL )
\r
4958 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
\r
4961 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
\r
4962 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
\r
4967 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
\r
4971 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
\r
4974 case eSetValueWithOverwrite :
\r
4975 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
\r
4978 case eSetValueWithoutOverwrite :
\r
4979 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
\r
4981 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
\r
4985 /* The value could not be written to the task. */
\r
4991 /* The task is being notified without its notify value being
\r
4996 /* Should not get here if all enums are handled.
\r
4997 Artificially force an assert by testing a value the
\r
4998 compiler can't assume is const. */
\r
4999 configASSERT( pxTCB->ulNotifiedValue[ uxIndexToNotify ] == ~0UL );
\r
5003 traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
\r
5005 /* If the task is in the blocked state specifically to wait for a
\r
5006 notification then unblock it now. */
\r
5007 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
5009 /* The task should not have been on an event list. */
\r
5010 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
5012 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
5014 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
5015 prvAddTaskToReadyList( pxTCB );
\r
5019 /* The delayed and ready lists cannot be accessed, so hold
\r
5020 this task pending until the scheduler is resumed. */
\r
5021 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
5024 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
5026 /* The notified task has a priority above the currently
\r
5027 executing task so a yield is required. */
\r
5028 if( pxHigherPriorityTaskWoken != NULL )
\r
5030 *pxHigherPriorityTaskWoken = pdTRUE;
\r
5033 /* Mark that a yield is pending in case the user is not
\r
5034 using the "xHigherPriorityTaskWoken" parameter to an ISR
\r
5035 safe FreeRTOS function. */
\r
5036 xYieldPending = pdTRUE;
\r
5040 mtCOVERAGE_TEST_MARKER();
\r
5044 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
5049 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5050 /*-----------------------------------------------------------*/
\r
5052 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
5054 void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, BaseType_t *pxHigherPriorityTaskWoken )
\r
5057 uint8_t ucOriginalNotifyState;
\r
5058 UBaseType_t uxSavedInterruptStatus;
\r
5060 configASSERT( xTaskToNotify );
\r
5061 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
\r
5063 /* RTOS ports that support interrupt nesting have the concept of a
\r
5064 maximum system call (or maximum API call) interrupt priority.
\r
5065 Interrupts that are above the maximum system call priority are keep
\r
5066 permanently enabled, even when the RTOS kernel is in a critical section,
\r
5067 but cannot make any calls to FreeRTOS API functions. If configASSERT()
\r
5068 is defined in FreeRTOSConfig.h then
\r
5069 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
5070 failure if a FreeRTOS API function is called from an interrupt that has
\r
5071 been assigned a priority above the configured maximum system call
\r
5072 priority. Only FreeRTOS functions that end in FromISR can be called
\r
5073 from interrupts that have been assigned a priority at or (logically)
\r
5074 below the maximum system call interrupt priority. FreeRTOS maintains a
\r
5075 separate interrupt safe API to ensure interrupt entry is as fast and as
\r
5076 simple as possible. More information (albeit Cortex-M specific) is
\r
5077 provided on the following link:
\r
5078 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
5079 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
5081 pxTCB = xTaskToNotify;
\r
5083 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
5085 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
\r
5086 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
\r
5088 /* 'Giving' is equivalent to incrementing a count in a counting
\r
5090 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
\r
5092 traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
\r
5094 /* If the task is in the blocked state specifically to wait for a
\r
5095 notification then unblock it now. */
\r
5096 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
\r
5098 /* The task should not have been on an event list. */
\r
5099 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
\r
5101 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
\r
5103 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
\r
5104 prvAddTaskToReadyList( pxTCB );
\r
5108 /* The delayed and ready lists cannot be accessed, so hold
\r
5109 this task pending until the scheduler is resumed. */
\r
5110 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
\r
5113 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
\r
5115 /* The notified task has a priority above the currently
\r
5116 executing task so a yield is required. */
\r
5117 if( pxHigherPriorityTaskWoken != NULL )
\r
5119 *pxHigherPriorityTaskWoken = pdTRUE;
\r
5122 /* Mark that a yield is pending in case the user is not
\r
5123 using the "xHigherPriorityTaskWoken" parameter in an ISR
\r
5124 safe FreeRTOS function. */
\r
5125 xYieldPending = pdTRUE;
\r
5129 mtCOVERAGE_TEST_MARKER();
\r
5133 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
5136 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5137 /*-----------------------------------------------------------*/
\r
5139 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
5141 BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask, UBaseType_t uxIndexToClear )
\r
5144 BaseType_t xReturn;
\r
5146 configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
\r
5148 /* If null is passed in here then it is the calling task that is having
\r
5149 its notification state cleared. */
\r
5150 pxTCB = prvGetTCBFromHandle( xTask );
\r
5152 taskENTER_CRITICAL();
\r
5154 if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
\r
5156 pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
\r
5164 taskEXIT_CRITICAL();
\r
5169 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5170 /*-----------------------------------------------------------*/
\r
5172 #if( configUSE_TASK_NOTIFICATIONS == 1 )
\r
5174 uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask, UBaseType_t uxIndexToClear, uint32_t ulBitsToClear )
\r
5177 uint32_t ulReturn;
\r
5179 /* If null is passed in here then it is the calling task that is having
\r
5180 its notification state cleared. */
\r
5181 pxTCB = prvGetTCBFromHandle( xTask );
\r
5183 taskENTER_CRITICAL();
\r
5185 /* Return the notification as it was before the bits were cleared,
\r
5186 then clear the bit mask. */
\r
5187 ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToClear ];
\r
5188 pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
\r
5190 taskEXIT_CRITICAL();
\r
5195 #endif /* configUSE_TASK_NOTIFICATIONS */
\r
5196 /*-----------------------------------------------------------*/
\r
5198 #if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
\r
5200 uint32_t ulTaskGetIdleRunTimeCounter( void )
\r
5202 return xIdleTaskHandle->ulRunTimeCounter;
\r
5206 /*-----------------------------------------------------------*/
\r
5208 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
\r
5210 TickType_t xTimeToWake;
\r
5211 const TickType_t xConstTickCount = xTickCount;
\r
5213 #if( INCLUDE_xTaskAbortDelay == 1 )
\r
5215 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
\r
5216 reset to pdFALSE so it can be detected as having been set to pdTRUE
\r
5217 when the task leaves the Blocked state. */
\r
5218 pxCurrentTCB->ucDelayAborted = pdFALSE;
\r
5222 /* Remove the task from the ready list before adding it to the blocked list
\r
5223 as the same list item is used for both lists. */
\r
5224 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
\r
5226 /* The current task must be in a ready list, so there is no need to
\r
5227 check, and the port reset macro can be called directly. */
\r
5228 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
\r
5232 mtCOVERAGE_TEST_MARKER();
\r
5235 #if ( INCLUDE_vTaskSuspend == 1 )
\r
5237 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
\r
5239 /* Add the task to the suspended task list instead of a delayed task
\r
5240 list to ensure it is not woken by a timing event. It will block
\r
5242 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5246 /* Calculate the time at which the task should be woken if the event
\r
5247 does not occur. This may overflow but this doesn't matter, the
\r
5248 kernel will manage it correctly. */
\r
5249 xTimeToWake = xConstTickCount + xTicksToWait;
\r
5251 /* The list item will be inserted in wake time order. */
\r
5252 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
\r
5254 if( xTimeToWake < xConstTickCount )
\r
5256 /* Wake time has overflowed. Place this item in the overflow
\r
5258 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5262 /* The wake time has not overflowed, so the current block list
\r
5264 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5266 /* If the task entering the blocked state was placed at the
\r
5267 head of the list of blocked tasks then xNextTaskUnblockTime
\r
5268 needs to be updated too. */
\r
5269 if( xTimeToWake < xNextTaskUnblockTime )
\r
5271 xNextTaskUnblockTime = xTimeToWake;
\r
5275 mtCOVERAGE_TEST_MARKER();
\r
5280 #else /* INCLUDE_vTaskSuspend */
\r
5282 /* Calculate the time at which the task should be woken if the event
\r
5283 does not occur. This may overflow but this doesn't matter, the kernel
\r
5284 will manage it correctly. */
\r
5285 xTimeToWake = xConstTickCount + xTicksToWait;
\r
5287 /* The list item will be inserted in wake time order. */
\r
5288 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
\r
5290 if( xTimeToWake < xConstTickCount )
\r
5292 /* Wake time has overflowed. Place this item in the overflow list. */
\r
5293 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5297 /* The wake time has not overflowed, so the current block list is used. */
\r
5298 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
\r
5300 /* If the task entering the blocked state was placed at the head of the
\r
5301 list of blocked tasks then xNextTaskUnblockTime needs to be updated
\r
5303 if( xTimeToWake < xNextTaskUnblockTime )
\r
5305 xNextTaskUnblockTime = xTimeToWake;
\r
5309 mtCOVERAGE_TEST_MARKER();
\r
5313 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
\r
5314 ( void ) xCanBlockIndefinitely;
\r
5316 #endif /* INCLUDE_vTaskSuspend */
\r
5319 /* Code below here allows additional code to be inserted into this source file,
\r
5320 especially where access to file scope functions and data is needed (for example
\r
5321 when performing module tests). */
\r
5323 #ifdef FREERTOS_MODULE_TEST
\r
5324 #include "tasks_test_access_functions.h"
\r
5328 #if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
\r
5330 #include "freertos_tasks_c_additions.h"
\r
5332 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
\r
5333 static void freertos_tasks_c_additions_init( void )
\r
5335 FREERTOS_TASKS_C_ADDITIONS_INIT();
\r