2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * SPDX-License-Identifier: MIT
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 * all the API functions to use the MPU wrappers. That should only be done when
34 * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
41 #if ( configUSE_CO_ROUTINES == 1 )
45 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
46 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
47 * for the header files above, but not in this file, in order to generate the
48 * correct privileged Vs unprivileged linkage and placement. */
49 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
52 /* Constants used with the cRxLock and cTxLock structure members. */
53 #define queueUNLOCKED ( ( int8_t ) -1 )
54 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
55 #define queueINT8_MAX ( ( int8_t ) 127 )
57 /* When the Queue_t structure is used to represent a base queue its pcHead and
58 * pcTail members are used as pointers into the queue storage area. When the
59 * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
60 * not necessary, and the pcHead pointer is set to NULL to indicate that the
61 * structure instead holds a pointer to the mutex holder (if any). Map alternative
62 * names to the pcHead and structure member to ensure the readability of the code
63 * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form
64 * a union as their usage is mutually exclusive dependent on what the queue is
66 #define uxQueueType pcHead
67 #define queueQUEUE_IS_MUTEX NULL
69 typedef struct QueuePointers
71 int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
72 int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
75 typedef struct SemaphoreData
77 TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */
78 UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
81 /* Semaphores do not actually store or copy data, so have an item size of
83 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
84 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
86 #if ( configUSE_PREEMPTION == 0 )
88 /* If the cooperative scheduler is being used then a yield should not be
89 * performed just because a higher priority task has been woken. */
90 #define queueYIELD_IF_USING_PREEMPTION()
92 #if ( configNUMBER_OF_CORES == 1 )
93 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
94 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
95 #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI()
96 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
100 * Definition of the queue used by the scheduler.
101 * Items are queued by copy, not reference. See the following link for the
102 * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
104 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
106 int8_t * pcHead; /**< Points to the beginning of the queue storage area. */
107 int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */
111 QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */
112 SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
115 List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
116 List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
118 volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
119 UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
120 UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */
122 volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
123 volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
125 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
126 uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
129 #if ( configUSE_QUEUE_SETS == 1 )
130 struct QueueDefinition * pxQueueSetContainer;
133 #if ( configUSE_TRACE_FACILITY == 1 )
134 UBaseType_t uxQueueNumber;
139 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
140 * name below to enable the use of older kernel aware debuggers. */
141 typedef xQUEUE Queue_t;
143 /*-----------------------------------------------------------*/
146 * The queue registry is just a means for kernel aware debuggers to locate
147 * queue structures. It has no other purpose so is an optional component.
149 #if ( configQUEUE_REGISTRY_SIZE > 0 )
151 /* The type stored within the queue registry array. This allows a name
152 * to be assigned to each queue making kernel aware debugging a little
153 * more user friendly. */
154 typedef struct QUEUE_REGISTRY_ITEM
156 const char * pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
157 QueueHandle_t xHandle;
158 } xQueueRegistryItem;
160 /* The old xQueueRegistryItem name is maintained above then typedefed to the
161 * new xQueueRegistryItem name below to enable the use of older kernel aware
163 typedef xQueueRegistryItem QueueRegistryItem_t;
165 /* The queue registry is simply an array of QueueRegistryItem_t structures.
166 * The pcQueueName member of a structure being NULL is indicative of the
167 * array position being vacant. */
169 /* MISRA Ref 8.4.2 [Declaration shall be visible] */
170 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
171 /* coverity[misra_c_2012_rule_8_4_violation] */
172 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
174 #endif /* configQUEUE_REGISTRY_SIZE */
177 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
178 * prevent an ISR from adding or removing items to the queue, but does prevent
179 * an ISR from removing tasks from the queue event lists. If an ISR finds a
180 * queue is locked it will instead increment the appropriate queue lock count
181 * to indicate that a task may require unblocking. When the queue in unlocked
182 * these lock counts are inspected, and the appropriate action taken.
184 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
187 * Uses a critical section to determine if there is any data in a queue.
189 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
191 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
194 * Uses a critical section to determine if there is any space in a queue.
196 * @return pdTRUE if there is no space, otherwise pdFALSE;
198 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
201 * Copies an item into the queue, either at the front of the queue or the
204 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
205 const void * pvItemToQueue,
206 const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
209 * Copies an item out of a queue.
211 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
212 void * const pvBuffer ) PRIVILEGED_FUNCTION;
214 #if ( configUSE_QUEUE_SETS == 1 )
217 * Checks to see if a queue is a member of a queue set, and if so, notifies
218 * the queue set that the queue contains data.
220 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
224 * Called after a Queue_t structure has been allocated either statically or
225 * dynamically to fill in the structure's members.
227 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
228 const UBaseType_t uxItemSize,
229 uint8_t * pucQueueStorage,
230 const uint8_t ucQueueType,
231 Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
234 * Mutexes are a special type of queue. When a mutex is created, first the
235 * queue is created, then prvInitialiseMutex() is called to configure the queue
238 #if ( configUSE_MUTEXES == 1 )
239 static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
242 #if ( configUSE_MUTEXES == 1 )
245 * If a task waiting for a mutex causes the mutex holder to inherit a
246 * priority, but the waiting task times out, then the holder should
247 * disinherit the priority - but only down to the highest priority of any
248 * other tasks that are waiting for the same mutex. This function returns
251 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
253 /*-----------------------------------------------------------*/
256 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
257 * accessing the queue event lists.
259 #define prvLockQueue( pxQueue ) \
260 taskENTER_CRITICAL(); \
262 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
264 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
266 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
268 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
274 * Macro to increment cTxLock member of the queue data structure. It is
275 * capped at the number of tasks in the system as we cannot unblock more
276 * tasks than the number of tasks in the system.
278 #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \
280 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
281 if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \
283 configASSERT( ( cTxLock ) != queueINT8_MAX ); \
284 ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
289 * Macro to increment cRxLock member of the queue data structure. It is
290 * capped at the number of tasks in the system as we cannot unblock more
291 * tasks than the number of tasks in the system.
293 #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \
295 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
296 if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \
298 configASSERT( ( cRxLock ) != queueINT8_MAX ); \
299 ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
302 /*-----------------------------------------------------------*/
304 BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
305 BaseType_t xNewQueue )
307 BaseType_t xReturn = pdPASS;
308 Queue_t * const pxQueue = xQueue;
310 traceENTER_xQueueGenericReset( xQueue, xNewQueue );
312 configASSERT( pxQueue );
314 if( ( pxQueue != NULL ) &&
315 ( pxQueue->uxLength >= 1U ) &&
316 /* Check for multiplication overflow. */
317 ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
319 taskENTER_CRITICAL();
321 pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
322 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
323 pxQueue->pcWriteTo = pxQueue->pcHead;
324 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
325 pxQueue->cRxLock = queueUNLOCKED;
326 pxQueue->cTxLock = queueUNLOCKED;
328 if( xNewQueue == pdFALSE )
330 /* If there are tasks blocked waiting to read from the queue, then
331 * the tasks will remain blocked as after this function exits the queue
332 * will still be empty. If there are tasks blocked waiting to write to
333 * the queue, then one should be unblocked as after this function exits
334 * it will be possible to write to it. */
335 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
337 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
339 queueYIELD_IF_USING_PREEMPTION();
343 mtCOVERAGE_TEST_MARKER();
348 mtCOVERAGE_TEST_MARKER();
353 /* Ensure the event queues start in the correct state. */
354 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
355 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
365 configASSERT( xReturn != pdFAIL );
367 /* A value is returned for calling semantic consistency with previous
369 traceRETURN_xQueueGenericReset( xReturn );
373 /*-----------------------------------------------------------*/
375 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
377 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
378 const UBaseType_t uxItemSize,
379 uint8_t * pucQueueStorage,
380 StaticQueue_t * pxStaticQueue,
381 const uint8_t ucQueueType )
383 Queue_t * pxNewQueue = NULL;
385 traceENTER_xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
387 /* The StaticQueue_t structure and the queue storage area must be
389 configASSERT( pxStaticQueue );
391 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
392 ( pxStaticQueue != NULL ) &&
394 /* A queue storage area should be provided if the item size is not 0, and
395 * should not be provided if the item size is 0. */
396 ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0U ) ) ) &&
397 ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0U ) ) ) )
399 #if ( configASSERT_DEFINED == 1 )
401 /* Sanity check that the size of the structure used to declare a
402 * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
403 * the real queue and semaphore structures. */
404 volatile size_t xSize = sizeof( StaticQueue_t );
406 /* This assertion cannot be branch covered in unit tests */
407 configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
408 ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
410 #endif /* configASSERT_DEFINED */
412 /* The address of a statically allocated queue was passed in, use it.
413 * The address of a statically allocated storage area was also passed in
414 * but is already set. */
415 /* MISRA Ref 11.3.1 [Misaligned access] */
416 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
417 /* coverity[misra_c_2012_rule_11_3_violation] */
418 pxNewQueue = ( Queue_t * ) pxStaticQueue;
420 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
422 /* Queues can be allocated wither statically or dynamically, so
423 * note this queue was allocated statically in case the queue is
425 pxNewQueue->ucStaticallyAllocated = pdTRUE;
427 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
429 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
433 configASSERT( pxNewQueue );
434 mtCOVERAGE_TEST_MARKER();
437 traceRETURN_xQueueGenericCreateStatic( pxNewQueue );
442 #endif /* configSUPPORT_STATIC_ALLOCATION */
443 /*-----------------------------------------------------------*/
445 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
447 BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
448 uint8_t ** ppucQueueStorage,
449 StaticQueue_t ** ppxStaticQueue )
452 Queue_t * const pxQueue = xQueue;
454 traceENTER_xQueueGenericGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue );
456 configASSERT( pxQueue );
457 configASSERT( ppxStaticQueue );
459 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
461 /* Check if the queue was statically allocated. */
462 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
464 if( ppucQueueStorage != NULL )
466 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
469 /* MISRA Ref 11.3.1 [Misaligned access] */
470 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
471 /* coverity[misra_c_2012_rule_11_3_violation] */
472 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
480 #else /* configSUPPORT_DYNAMIC_ALLOCATION */
482 /* Queue must have been statically allocated. */
483 if( ppucQueueStorage != NULL )
485 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
488 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
491 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
493 traceRETURN_xQueueGenericGetStaticBuffers( xReturn );
498 #endif /* configSUPPORT_STATIC_ALLOCATION */
499 /*-----------------------------------------------------------*/
501 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
503 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
504 const UBaseType_t uxItemSize,
505 const uint8_t ucQueueType )
507 Queue_t * pxNewQueue = NULL;
508 size_t xQueueSizeInBytes;
509 uint8_t * pucQueueStorage;
511 traceENTER_xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
513 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
514 /* Check for multiplication overflow. */
515 ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
516 /* Check for addition overflow. */
517 ( ( UBaseType_t ) ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
519 /* Allocate enough space to hold the maximum number of items that
520 * can be in the queue at any time. It is valid for uxItemSize to be
521 * zero in the case the queue is used as a semaphore. */
522 xQueueSizeInBytes = ( size_t ) ( ( size_t ) uxQueueLength * ( size_t ) uxItemSize );
524 /* MISRA Ref 11.5.1 [Malloc memory assignment] */
525 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
526 /* coverity[misra_c_2012_rule_11_5_violation] */
527 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
529 if( pxNewQueue != NULL )
531 /* Jump past the queue structure to find the location of the queue
533 pucQueueStorage = ( uint8_t * ) pxNewQueue;
534 pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
536 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
538 /* Queues can be created either statically or dynamically, so
539 * note this task was created dynamically in case it is later
541 pxNewQueue->ucStaticallyAllocated = pdFALSE;
543 #endif /* configSUPPORT_STATIC_ALLOCATION */
545 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
549 traceQUEUE_CREATE_FAILED( ucQueueType );
550 mtCOVERAGE_TEST_MARKER();
555 configASSERT( pxNewQueue );
556 mtCOVERAGE_TEST_MARKER();
559 traceRETURN_xQueueGenericCreate( pxNewQueue );
564 #endif /* configSUPPORT_STATIC_ALLOCATION */
565 /*-----------------------------------------------------------*/
567 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
568 const UBaseType_t uxItemSize,
569 uint8_t * pucQueueStorage,
570 const uint8_t ucQueueType,
571 Queue_t * pxNewQueue )
573 /* Remove compiler warnings about unused parameters should
574 * configUSE_TRACE_FACILITY not be set to 1. */
575 ( void ) ucQueueType;
577 if( uxItemSize == ( UBaseType_t ) 0 )
579 /* No RAM was allocated for the queue storage area, but PC head cannot
580 * be set to NULL because NULL is used as a key to say the queue is used as
581 * a mutex. Therefore just set pcHead to point to the queue as a benign
582 * value that is known to be within the memory map. */
583 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
587 /* Set the head to the start of the queue storage area. */
588 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
591 /* Initialise the queue members as described where the queue type is
593 pxNewQueue->uxLength = uxQueueLength;
594 pxNewQueue->uxItemSize = uxItemSize;
595 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
597 #if ( configUSE_TRACE_FACILITY == 1 )
599 pxNewQueue->ucQueueType = ucQueueType;
601 #endif /* configUSE_TRACE_FACILITY */
603 #if ( configUSE_QUEUE_SETS == 1 )
605 pxNewQueue->pxQueueSetContainer = NULL;
607 #endif /* configUSE_QUEUE_SETS */
609 traceQUEUE_CREATE( pxNewQueue );
611 /*-----------------------------------------------------------*/
613 #if ( configUSE_MUTEXES == 1 )
615 static void prvInitialiseMutex( Queue_t * pxNewQueue )
617 if( pxNewQueue != NULL )
619 /* The queue create function will set all the queue structure members
620 * correctly for a generic queue, but this function is creating a
621 * mutex. Overwrite those members that need to be set differently -
622 * in particular the information required for priority inheritance. */
623 pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
624 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
626 /* In case this is a recursive mutex. */
627 pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
629 traceCREATE_MUTEX( pxNewQueue );
631 /* Start with the semaphore in the expected state. */
632 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
636 traceCREATE_MUTEX_FAILED();
640 #endif /* configUSE_MUTEXES */
641 /*-----------------------------------------------------------*/
643 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
645 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
647 QueueHandle_t xNewQueue;
648 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
650 traceENTER_xQueueCreateMutex( ucQueueType );
652 xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
653 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
655 traceRETURN_xQueueCreateMutex( xNewQueue );
660 #endif /* configUSE_MUTEXES */
661 /*-----------------------------------------------------------*/
663 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
665 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
666 StaticQueue_t * pxStaticQueue )
668 QueueHandle_t xNewQueue;
669 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
671 traceENTER_xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
673 /* Prevent compiler warnings about unused parameters if
674 * configUSE_TRACE_FACILITY does not equal 1. */
675 ( void ) ucQueueType;
677 xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
678 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
680 traceRETURN_xQueueCreateMutexStatic( xNewQueue );
685 #endif /* configUSE_MUTEXES */
686 /*-----------------------------------------------------------*/
688 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
690 TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
692 TaskHandle_t pxReturn;
693 Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
695 traceENTER_xQueueGetMutexHolder( xSemaphore );
697 configASSERT( xSemaphore );
699 /* This function is called by xSemaphoreGetMutexHolder(), and should not
700 * be called directly. Note: This is a good way of determining if the
701 * calling task is the mutex holder, but not a good way of determining the
702 * identity of the mutex holder, as the holder may change between the
703 * following critical section exiting and the function returning. */
704 taskENTER_CRITICAL();
706 if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
708 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
717 traceRETURN_xQueueGetMutexHolder( pxReturn );
720 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
722 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
723 /*-----------------------------------------------------------*/
725 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
727 TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
729 TaskHandle_t pxReturn;
731 traceENTER_xQueueGetMutexHolderFromISR( xSemaphore );
733 configASSERT( xSemaphore );
735 /* Mutexes cannot be used in interrupt service routines, so the mutex
736 * holder should not change in an ISR, and therefore a critical section is
737 * not required here. */
738 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
740 pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
747 traceRETURN_xQueueGetMutexHolderFromISR( pxReturn );
750 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
752 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
753 /*-----------------------------------------------------------*/
755 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
757 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
760 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
762 traceENTER_xQueueGiveMutexRecursive( xMutex );
764 configASSERT( pxMutex );
766 /* If this is the task that holds the mutex then xMutexHolder will not
767 * change outside of this task. If this task does not hold the mutex then
768 * pxMutexHolder can never coincidentally equal the tasks handle, and as
769 * this is the only condition we are interested in it does not matter if
770 * pxMutexHolder is accessed simultaneously by another task. Therefore no
771 * mutual exclusion is required to test the pxMutexHolder variable. */
772 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
774 traceGIVE_MUTEX_RECURSIVE( pxMutex );
776 /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
777 * the task handle, therefore no underflow check is required. Also,
778 * uxRecursiveCallCount is only modified by the mutex holder, and as
779 * there can only be one, no mutual exclusion is required to modify the
780 * uxRecursiveCallCount member. */
781 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
783 /* Has the recursive call count unwound to 0? */
784 if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
786 /* Return the mutex. This will automatically unblock any other
787 * task that might be waiting to access the mutex. */
788 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
792 mtCOVERAGE_TEST_MARKER();
799 /* The mutex cannot be given because the calling task is not the
803 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
806 traceRETURN_xQueueGiveMutexRecursive( xReturn );
811 #endif /* configUSE_RECURSIVE_MUTEXES */
812 /*-----------------------------------------------------------*/
814 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
816 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
817 TickType_t xTicksToWait )
820 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
822 traceENTER_xQueueTakeMutexRecursive( xMutex, xTicksToWait );
824 configASSERT( pxMutex );
826 /* Comments regarding mutual exclusion as per those within
827 * xQueueGiveMutexRecursive(). */
829 traceTAKE_MUTEX_RECURSIVE( pxMutex );
831 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
833 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
838 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
840 /* pdPASS will only be returned if the mutex was successfully
841 * obtained. The calling task may have entered the Blocked state
842 * before reaching here. */
843 if( xReturn != pdFAIL )
845 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
849 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
853 traceRETURN_xQueueTakeMutexRecursive( xReturn );
858 #endif /* configUSE_RECURSIVE_MUTEXES */
859 /*-----------------------------------------------------------*/
861 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
863 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
864 const UBaseType_t uxInitialCount,
865 StaticQueue_t * pxStaticQueue )
867 QueueHandle_t xHandle = NULL;
869 traceENTER_xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
871 if( ( uxMaxCount != 0U ) &&
872 ( uxInitialCount <= uxMaxCount ) )
874 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
876 if( xHandle != NULL )
878 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
880 traceCREATE_COUNTING_SEMAPHORE();
884 traceCREATE_COUNTING_SEMAPHORE_FAILED();
889 configASSERT( xHandle );
890 mtCOVERAGE_TEST_MARKER();
893 traceRETURN_xQueueCreateCountingSemaphoreStatic( xHandle );
898 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
899 /*-----------------------------------------------------------*/
901 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
903 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
904 const UBaseType_t uxInitialCount )
906 QueueHandle_t xHandle = NULL;
908 traceENTER_xQueueCreateCountingSemaphore( uxMaxCount, uxInitialCount );
910 if( ( uxMaxCount != 0U ) &&
911 ( uxInitialCount <= uxMaxCount ) )
913 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
915 if( xHandle != NULL )
917 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
919 traceCREATE_COUNTING_SEMAPHORE();
923 traceCREATE_COUNTING_SEMAPHORE_FAILED();
928 configASSERT( xHandle );
929 mtCOVERAGE_TEST_MARKER();
932 traceRETURN_xQueueCreateCountingSemaphore( xHandle );
937 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
938 /*-----------------------------------------------------------*/
940 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
941 const void * const pvItemToQueue,
942 TickType_t xTicksToWait,
943 const BaseType_t xCopyPosition )
945 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
947 Queue_t * const pxQueue = xQueue;
949 traceENTER_xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
951 configASSERT( pxQueue );
952 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
953 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
954 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
956 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
960 /*lint -save -e904 This function relaxes the coding standard somewhat to
961 * allow return statements within the function itself. This is done in the
962 * interest of execution time efficiency. */
965 taskENTER_CRITICAL();
967 /* Is there room on the queue now? The running task must be the
968 * highest priority task wanting to access the queue. If the head item
969 * in the queue is to be overwritten then it does not matter if the
971 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
973 traceQUEUE_SEND( pxQueue );
975 #if ( configUSE_QUEUE_SETS == 1 )
977 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
979 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
981 if( pxQueue->pxQueueSetContainer != NULL )
983 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
985 /* Do not notify the queue set as an existing item
986 * was overwritten in the queue so the number of items
987 * in the queue has not changed. */
988 mtCOVERAGE_TEST_MARKER();
990 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
992 /* The queue is a member of a queue set, and posting
993 * to the queue set caused a higher priority task to
994 * unblock. A context switch is required. */
995 queueYIELD_IF_USING_PREEMPTION();
999 mtCOVERAGE_TEST_MARKER();
1004 /* If there was a task waiting for data to arrive on the
1005 * queue then unblock it now. */
1006 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1008 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1010 /* The unblocked task has a priority higher than
1011 * our own so yield immediately. Yes it is ok to
1012 * do this from within the critical section - the
1013 * kernel takes care of that. */
1014 queueYIELD_IF_USING_PREEMPTION();
1018 mtCOVERAGE_TEST_MARKER();
1021 else if( xYieldRequired != pdFALSE )
1023 /* This path is a special case that will only get
1024 * executed if the task was holding multiple mutexes
1025 * and the mutexes were given back in an order that is
1026 * different to that in which they were taken. */
1027 queueYIELD_IF_USING_PREEMPTION();
1031 mtCOVERAGE_TEST_MARKER();
1035 #else /* configUSE_QUEUE_SETS */
1037 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1039 /* If there was a task waiting for data to arrive on the
1040 * queue then unblock it now. */
1041 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1043 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1045 /* The unblocked task has a priority higher than
1046 * our own so yield immediately. Yes it is ok to do
1047 * this from within the critical section - the kernel
1048 * takes care of that. */
1049 queueYIELD_IF_USING_PREEMPTION();
1053 mtCOVERAGE_TEST_MARKER();
1056 else if( xYieldRequired != pdFALSE )
1058 /* This path is a special case that will only get
1059 * executed if the task was holding multiple mutexes and
1060 * the mutexes were given back in an order that is
1061 * different to that in which they were taken. */
1062 queueYIELD_IF_USING_PREEMPTION();
1066 mtCOVERAGE_TEST_MARKER();
1069 #endif /* configUSE_QUEUE_SETS */
1071 taskEXIT_CRITICAL();
1073 traceRETURN_xQueueGenericSend( pdPASS );
1079 if( xTicksToWait == ( TickType_t ) 0 )
1081 /* The queue was full and no block time is specified (or
1082 * the block time has expired) so leave now. */
1083 taskEXIT_CRITICAL();
1085 /* Return to the original privilege level before exiting
1087 traceQUEUE_SEND_FAILED( pxQueue );
1088 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1090 return errQUEUE_FULL;
1092 else if( xEntryTimeSet == pdFALSE )
1094 /* The queue was full and a block time was specified so
1095 * configure the timeout structure. */
1096 vTaskInternalSetTimeOutState( &xTimeOut );
1097 xEntryTimeSet = pdTRUE;
1101 /* Entry time was already set. */
1102 mtCOVERAGE_TEST_MARKER();
1106 taskEXIT_CRITICAL();
1108 /* Interrupts and other tasks can send to and receive from the queue
1109 * now the critical section has been exited. */
1112 prvLockQueue( pxQueue );
1114 /* Update the timeout state to see if it has expired yet. */
1115 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1117 if( prvIsQueueFull( pxQueue ) != pdFALSE )
1119 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
1120 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
1122 /* Unlocking the queue means queue events can effect the
1123 * event list. It is possible that interrupts occurring now
1124 * remove this task from the event list again - but as the
1125 * scheduler is suspended the task will go onto the pending
1126 * ready list instead of the actual ready list. */
1127 prvUnlockQueue( pxQueue );
1129 /* Resuming the scheduler will move tasks from the pending
1130 * ready list into the ready list - so it is feasible that this
1131 * task is already in the ready list before it yields - in which
1132 * case the yield will not cause a context switch unless there
1133 * is also a higher priority task in the pending ready list. */
1134 if( xTaskResumeAll() == pdFALSE )
1136 taskYIELD_WITHIN_API();
1142 prvUnlockQueue( pxQueue );
1143 ( void ) xTaskResumeAll();
1148 /* The timeout has expired. */
1149 prvUnlockQueue( pxQueue );
1150 ( void ) xTaskResumeAll();
1152 traceQUEUE_SEND_FAILED( pxQueue );
1153 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1155 return errQUEUE_FULL;
1157 } /*lint -restore */
1159 /*-----------------------------------------------------------*/
1161 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
1162 const void * const pvItemToQueue,
1163 BaseType_t * const pxHigherPriorityTaskWoken,
1164 const BaseType_t xCopyPosition )
1167 UBaseType_t uxSavedInterruptStatus;
1168 Queue_t * const pxQueue = xQueue;
1170 traceENTER_xQueueGenericSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
1172 configASSERT( pxQueue );
1173 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1174 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1176 /* RTOS ports that support interrupt nesting have the concept of a maximum
1177 * system call (or maximum API call) interrupt priority. Interrupts that are
1178 * above the maximum system call priority are kept permanently enabled, even
1179 * when the RTOS kernel is in a critical section, but cannot make any calls to
1180 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1181 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1182 * failure if a FreeRTOS API function is called from an interrupt that has been
1183 * assigned a priority above the configured maximum system call priority.
1184 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1185 * that have been assigned a priority at or (logically) below the maximum
1186 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1187 * safe API to ensure interrupt entry is as fast and as simple as possible.
1188 * More information (albeit Cortex-M specific) is provided on the following
1189 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1190 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1192 /* Similar to xQueueGenericSend, except without blocking if there is no room
1193 * in the queue. Also don't directly wake a task that was blocked on a queue
1194 * read, instead return a flag to say whether a context switch is required or
1195 * not (i.e. has a task with a higher priority than us been woken by this
1197 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1199 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1201 const int8_t cTxLock = pxQueue->cTxLock;
1202 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
1204 traceQUEUE_SEND_FROM_ISR( pxQueue );
1206 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1207 * semaphore or mutex. That means prvCopyDataToQueue() cannot result
1208 * in a task disinheriting a priority and prvCopyDataToQueue() can be
1209 * called here even though the disinherit function does not check if
1210 * the scheduler is suspended before accessing the ready lists. */
1211 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1213 /* The event list is not altered if the queue is locked. This will
1214 * be done when the queue is unlocked later. */
1215 if( cTxLock == queueUNLOCKED )
1217 #if ( configUSE_QUEUE_SETS == 1 )
1219 if( pxQueue->pxQueueSetContainer != NULL )
1221 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
1223 /* Do not notify the queue set as an existing item
1224 * was overwritten in the queue so the number of items
1225 * in the queue has not changed. */
1226 mtCOVERAGE_TEST_MARKER();
1228 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1230 /* The queue is a member of a queue set, and posting
1231 * to the queue set caused a higher priority task to
1232 * unblock. A context switch is required. */
1233 if( pxHigherPriorityTaskWoken != NULL )
1235 *pxHigherPriorityTaskWoken = pdTRUE;
1239 mtCOVERAGE_TEST_MARKER();
1244 mtCOVERAGE_TEST_MARKER();
1249 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1251 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1253 /* The task waiting has a higher priority so
1254 * record that a context switch is required. */
1255 if( pxHigherPriorityTaskWoken != NULL )
1257 *pxHigherPriorityTaskWoken = pdTRUE;
1261 mtCOVERAGE_TEST_MARKER();
1266 mtCOVERAGE_TEST_MARKER();
1271 mtCOVERAGE_TEST_MARKER();
1275 #else /* configUSE_QUEUE_SETS */
1277 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1279 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1281 /* The task waiting has a higher priority so record that a
1282 * context switch is required. */
1283 if( pxHigherPriorityTaskWoken != NULL )
1285 *pxHigherPriorityTaskWoken = pdTRUE;
1289 mtCOVERAGE_TEST_MARKER();
1294 mtCOVERAGE_TEST_MARKER();
1299 mtCOVERAGE_TEST_MARKER();
1302 /* Not used in this path. */
1303 ( void ) uxPreviousMessagesWaiting;
1305 #endif /* configUSE_QUEUE_SETS */
1309 /* Increment the lock count so the task that unlocks the queue
1310 * knows that data was posted while it was locked. */
1311 prvIncrementQueueTxLock( pxQueue, cTxLock );
1318 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1319 xReturn = errQUEUE_FULL;
1322 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1324 traceRETURN_xQueueGenericSendFromISR( xReturn );
1328 /*-----------------------------------------------------------*/
1330 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
1331 BaseType_t * const pxHigherPriorityTaskWoken )
1334 UBaseType_t uxSavedInterruptStatus;
1335 Queue_t * const pxQueue = xQueue;
1337 traceENTER_xQueueGiveFromISR( xQueue, pxHigherPriorityTaskWoken );
1339 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1340 * item size is 0. Don't directly wake a task that was blocked on a queue
1341 * read, instead return a flag to say whether a context switch is required or
1342 * not (i.e. has a task with a higher priority than us been woken by this
1345 configASSERT( pxQueue );
1347 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1348 * if the item size is not 0. */
1349 configASSERT( pxQueue->uxItemSize == 0 );
1351 /* Normally a mutex would not be given from an interrupt, especially if
1352 * there is a mutex holder, as priority inheritance makes no sense for an
1353 * interrupts, only tasks. */
1354 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1356 /* RTOS ports that support interrupt nesting have the concept of a maximum
1357 * system call (or maximum API call) interrupt priority. Interrupts that are
1358 * above the maximum system call priority are kept permanently enabled, even
1359 * when the RTOS kernel is in a critical section, but cannot make any calls to
1360 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1361 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1362 * failure if a FreeRTOS API function is called from an interrupt that has been
1363 * assigned a priority above the configured maximum system call priority.
1364 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1365 * that have been assigned a priority at or (logically) below the maximum
1366 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1367 * safe API to ensure interrupt entry is as fast and as simple as possible.
1368 * More information (albeit Cortex-M specific) is provided on the following
1369 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1370 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1372 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1374 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1376 /* When the queue is used to implement a semaphore no data is ever
1377 * moved through the queue but it is still valid to see if the queue 'has
1379 if( uxMessagesWaiting < pxQueue->uxLength )
1381 const int8_t cTxLock = pxQueue->cTxLock;
1383 traceQUEUE_SEND_FROM_ISR( pxQueue );
1385 /* A task can only have an inherited priority if it is a mutex
1386 * holder - and if there is a mutex holder then the mutex cannot be
1387 * given from an ISR. As this is the ISR version of the function it
1388 * can be assumed there is no mutex holder and no need to determine if
1389 * priority disinheritance is needed. Simply increase the count of
1390 * messages (semaphores) available. */
1391 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
1393 /* The event list is not altered if the queue is locked. This will
1394 * be done when the queue is unlocked later. */
1395 if( cTxLock == queueUNLOCKED )
1397 #if ( configUSE_QUEUE_SETS == 1 )
1399 if( pxQueue->pxQueueSetContainer != NULL )
1401 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1403 /* The semaphore is a member of a queue set, and
1404 * posting to the queue set caused a higher priority
1405 * task to unblock. A context switch is required. */
1406 if( pxHigherPriorityTaskWoken != NULL )
1408 *pxHigherPriorityTaskWoken = pdTRUE;
1412 mtCOVERAGE_TEST_MARKER();
1417 mtCOVERAGE_TEST_MARKER();
1422 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1424 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1426 /* The task waiting has a higher priority so
1427 * record that a context switch is required. */
1428 if( pxHigherPriorityTaskWoken != NULL )
1430 *pxHigherPriorityTaskWoken = pdTRUE;
1434 mtCOVERAGE_TEST_MARKER();
1439 mtCOVERAGE_TEST_MARKER();
1444 mtCOVERAGE_TEST_MARKER();
1448 #else /* configUSE_QUEUE_SETS */
1450 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1452 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1454 /* The task waiting has a higher priority so record that a
1455 * context switch is required. */
1456 if( pxHigherPriorityTaskWoken != NULL )
1458 *pxHigherPriorityTaskWoken = pdTRUE;
1462 mtCOVERAGE_TEST_MARKER();
1467 mtCOVERAGE_TEST_MARKER();
1472 mtCOVERAGE_TEST_MARKER();
1475 #endif /* configUSE_QUEUE_SETS */
1479 /* Increment the lock count so the task that unlocks the queue
1480 * knows that data was posted while it was locked. */
1481 prvIncrementQueueTxLock( pxQueue, cTxLock );
1488 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1489 xReturn = errQUEUE_FULL;
1492 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1494 traceRETURN_xQueueGiveFromISR( xReturn );
1498 /*-----------------------------------------------------------*/
1500 BaseType_t xQueueReceive( QueueHandle_t xQueue,
1501 void * const pvBuffer,
1502 TickType_t xTicksToWait )
1504 BaseType_t xEntryTimeSet = pdFALSE;
1506 Queue_t * const pxQueue = xQueue;
1508 traceENTER_xQueueReceive( xQueue, pvBuffer, xTicksToWait );
1510 /* Check the pointer is not NULL. */
1511 configASSERT( ( pxQueue ) );
1513 /* The buffer into which data is received can only be NULL if the data size
1514 * is zero (so no data is copied into the buffer). */
1515 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1517 /* Cannot block if the scheduler is suspended. */
1518 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1520 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1524 /*lint -save -e904 This function relaxes the coding standard somewhat to
1525 * allow return statements within the function itself. This is done in the
1526 * interest of execution time efficiency. */
1529 taskENTER_CRITICAL();
1531 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1533 /* Is there data in the queue now? To be running the calling task
1534 * must be the highest priority task wanting to access the queue. */
1535 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1537 /* Data available, remove one item. */
1538 prvCopyDataFromQueue( pxQueue, pvBuffer );
1539 traceQUEUE_RECEIVE( pxQueue );
1540 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
1542 /* There is now space in the queue, were any tasks waiting to
1543 * post to the queue? If so, unblock the highest priority waiting
1545 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1547 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1549 queueYIELD_IF_USING_PREEMPTION();
1553 mtCOVERAGE_TEST_MARKER();
1558 mtCOVERAGE_TEST_MARKER();
1561 taskEXIT_CRITICAL();
1563 traceRETURN_xQueueReceive( pdPASS );
1569 if( xTicksToWait == ( TickType_t ) 0 )
1571 /* The queue was empty and no block time is specified (or
1572 * the block time has expired) so leave now. */
1573 taskEXIT_CRITICAL();
1575 traceQUEUE_RECEIVE_FAILED( pxQueue );
1576 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1578 return errQUEUE_EMPTY;
1580 else if( xEntryTimeSet == pdFALSE )
1582 /* The queue was empty and a block time was specified so
1583 * configure the timeout structure. */
1584 vTaskInternalSetTimeOutState( &xTimeOut );
1585 xEntryTimeSet = pdTRUE;
1589 /* Entry time was already set. */
1590 mtCOVERAGE_TEST_MARKER();
1594 taskEXIT_CRITICAL();
1596 /* Interrupts and other tasks can send to and receive from the queue
1597 * now the critical section has been exited. */
1600 prvLockQueue( pxQueue );
1602 /* Update the timeout state to see if it has expired yet. */
1603 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1605 /* The timeout has not expired. If the queue is still empty place
1606 * the task on the list of tasks waiting to receive from the queue. */
1607 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1609 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1610 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1611 prvUnlockQueue( pxQueue );
1613 if( xTaskResumeAll() == pdFALSE )
1615 taskYIELD_WITHIN_API();
1619 mtCOVERAGE_TEST_MARKER();
1624 /* The queue contains data again. Loop back to try and read the
1626 prvUnlockQueue( pxQueue );
1627 ( void ) xTaskResumeAll();
1632 /* Timed out. If there is no data in the queue exit, otherwise loop
1633 * back and attempt to read the data. */
1634 prvUnlockQueue( pxQueue );
1635 ( void ) xTaskResumeAll();
1637 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1639 traceQUEUE_RECEIVE_FAILED( pxQueue );
1640 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1642 return errQUEUE_EMPTY;
1646 mtCOVERAGE_TEST_MARKER();
1649 } /*lint -restore */
1651 /*-----------------------------------------------------------*/
1653 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
1654 TickType_t xTicksToWait )
1656 BaseType_t xEntryTimeSet = pdFALSE;
1658 Queue_t * const pxQueue = xQueue;
1660 #if ( configUSE_MUTEXES == 1 )
1661 BaseType_t xInheritanceOccurred = pdFALSE;
1664 traceENTER_xQueueSemaphoreTake( xQueue, xTicksToWait );
1666 /* Check the queue pointer is not NULL. */
1667 configASSERT( ( pxQueue ) );
1669 /* Check this really is a semaphore, in which case the item size will be
1671 configASSERT( pxQueue->uxItemSize == 0 );
1673 /* Cannot block if the scheduler is suspended. */
1674 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1676 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1680 /*lint -save -e904 This function relaxes the coding standard somewhat to allow return
1681 * statements within the function itself. This is done in the interest
1682 * of execution time efficiency. */
1685 taskENTER_CRITICAL();
1687 /* Semaphores are queues with an item size of 0, and where the
1688 * number of messages in the queue is the semaphore's count value. */
1689 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1691 /* Is there data in the queue now? To be running the calling task
1692 * must be the highest priority task wanting to access the queue. */
1693 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1695 traceQUEUE_RECEIVE( pxQueue );
1697 /* Semaphores are queues with a data size of zero and where the
1698 * messages waiting is the semaphore's count. Reduce the count. */
1699 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxSemaphoreCount - ( UBaseType_t ) 1 );
1701 #if ( configUSE_MUTEXES == 1 )
1703 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1705 /* Record the information required to implement
1706 * priority inheritance should it become necessary. */
1707 pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1711 mtCOVERAGE_TEST_MARKER();
1714 #endif /* configUSE_MUTEXES */
1716 /* Check to see if other tasks are blocked waiting to give the
1717 * semaphore, and if so, unblock the highest priority such task. */
1718 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1720 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1722 queueYIELD_IF_USING_PREEMPTION();
1726 mtCOVERAGE_TEST_MARKER();
1731 mtCOVERAGE_TEST_MARKER();
1734 taskEXIT_CRITICAL();
1736 traceRETURN_xQueueSemaphoreTake( pdPASS );
1742 if( xTicksToWait == ( TickType_t ) 0 )
1744 /* The semaphore count was 0 and no block time is specified
1745 * (or the block time has expired) so exit now. */
1746 taskEXIT_CRITICAL();
1748 traceQUEUE_RECEIVE_FAILED( pxQueue );
1749 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1751 return errQUEUE_EMPTY;
1753 else if( xEntryTimeSet == pdFALSE )
1755 /* The semaphore count was 0 and a block time was specified
1756 * so configure the timeout structure ready to block. */
1757 vTaskInternalSetTimeOutState( &xTimeOut );
1758 xEntryTimeSet = pdTRUE;
1762 /* Entry time was already set. */
1763 mtCOVERAGE_TEST_MARKER();
1767 taskEXIT_CRITICAL();
1769 /* Interrupts and other tasks can give to and take from the semaphore
1770 * now the critical section has been exited. */
1773 prvLockQueue( pxQueue );
1775 /* Update the timeout state to see if it has expired yet. */
1776 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1778 /* A block time is specified and not expired. If the semaphore
1779 * count is 0 then enter the Blocked state to wait for a semaphore to
1780 * become available. As semaphores are implemented with queues the
1781 * queue being empty is equivalent to the semaphore count being 0. */
1782 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1784 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1786 #if ( configUSE_MUTEXES == 1 )
1788 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1790 taskENTER_CRITICAL();
1792 xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1794 taskEXIT_CRITICAL();
1798 mtCOVERAGE_TEST_MARKER();
1801 #endif /* if ( configUSE_MUTEXES == 1 ) */
1803 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1804 prvUnlockQueue( pxQueue );
1806 if( xTaskResumeAll() == pdFALSE )
1808 taskYIELD_WITHIN_API();
1812 mtCOVERAGE_TEST_MARKER();
1817 /* There was no timeout and the semaphore count was not 0, so
1818 * attempt to take the semaphore again. */
1819 prvUnlockQueue( pxQueue );
1820 ( void ) xTaskResumeAll();
1826 prvUnlockQueue( pxQueue );
1827 ( void ) xTaskResumeAll();
1829 /* If the semaphore count is 0 exit now as the timeout has
1830 * expired. Otherwise return to attempt to take the semaphore that is
1831 * known to be available. As semaphores are implemented by queues the
1832 * queue being empty is equivalent to the semaphore count being 0. */
1833 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1835 #if ( configUSE_MUTEXES == 1 )
1837 /* xInheritanceOccurred could only have be set if
1838 * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1839 * test the mutex type again to check it is actually a mutex. */
1840 if( xInheritanceOccurred != pdFALSE )
1842 taskENTER_CRITICAL();
1844 UBaseType_t uxHighestWaitingPriority;
1846 /* This task blocking on the mutex caused another
1847 * task to inherit this task's priority. Now this task
1848 * has timed out the priority should be disinherited
1849 * again, but only as low as the next highest priority
1850 * task that is waiting for the same mutex. */
1851 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1853 /* vTaskPriorityDisinheritAfterTimeout uses the uxHighestWaitingPriority
1854 * parameter to index pxReadyTasksLists when adding the task holding
1855 * mutex to the ready list for its new priority. Coverity thinks that
1856 * it can result in out-of-bounds access which is not true because
1857 * uxHighestWaitingPriority, as returned by prvGetDisinheritPriorityAfterTimeout,
1858 * is capped at ( configMAX_PRIORITIES - 1 ). */
1859 /* coverity[overrun] */
1860 vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1862 taskEXIT_CRITICAL();
1865 #endif /* configUSE_MUTEXES */
1867 traceQUEUE_RECEIVE_FAILED( pxQueue );
1868 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1870 return errQUEUE_EMPTY;
1874 mtCOVERAGE_TEST_MARKER();
1877 } /*lint -restore */
1879 /*-----------------------------------------------------------*/
1881 BaseType_t xQueuePeek( QueueHandle_t xQueue,
1882 void * const pvBuffer,
1883 TickType_t xTicksToWait )
1885 BaseType_t xEntryTimeSet = pdFALSE;
1887 int8_t * pcOriginalReadPosition;
1888 Queue_t * const pxQueue = xQueue;
1890 traceENTER_xQueuePeek( xQueue, pvBuffer, xTicksToWait );
1892 /* Check the pointer is not NULL. */
1893 configASSERT( ( pxQueue ) );
1895 /* The buffer into which data is received can only be NULL if the data size
1896 * is zero (so no data is copied into the buffer. */
1897 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1899 /* Cannot block if the scheduler is suspended. */
1900 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1902 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1906 /*lint -save -e904 This function relaxes the coding standard somewhat to
1907 * allow return statements within the function itself. This is done in the
1908 * interest of execution time efficiency. */
1911 taskENTER_CRITICAL();
1913 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1915 /* Is there data in the queue now? To be running the calling task
1916 * must be the highest priority task wanting to access the queue. */
1917 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1919 /* Remember the read position so it can be reset after the data
1920 * is read from the queue as this function is only peeking the
1921 * data, not removing it. */
1922 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1924 prvCopyDataFromQueue( pxQueue, pvBuffer );
1925 traceQUEUE_PEEK( pxQueue );
1927 /* The data is not being removed, so reset the read pointer. */
1928 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1930 /* The data is being left in the queue, so see if there are
1931 * any other tasks waiting for the data. */
1932 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1934 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1936 /* The task waiting has a higher priority than this task. */
1937 queueYIELD_IF_USING_PREEMPTION();
1941 mtCOVERAGE_TEST_MARKER();
1946 mtCOVERAGE_TEST_MARKER();
1949 taskEXIT_CRITICAL();
1951 traceRETURN_xQueuePeek( pdPASS );
1957 if( xTicksToWait == ( TickType_t ) 0 )
1959 /* The queue was empty and no block time is specified (or
1960 * the block time has expired) so leave now. */
1961 taskEXIT_CRITICAL();
1963 traceQUEUE_PEEK_FAILED( pxQueue );
1964 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
1966 return errQUEUE_EMPTY;
1968 else if( xEntryTimeSet == pdFALSE )
1970 /* The queue was empty and a block time was specified so
1971 * configure the timeout structure ready to enter the blocked
1973 vTaskInternalSetTimeOutState( &xTimeOut );
1974 xEntryTimeSet = pdTRUE;
1978 /* Entry time was already set. */
1979 mtCOVERAGE_TEST_MARKER();
1983 taskEXIT_CRITICAL();
1985 /* Interrupts and other tasks can send to and receive from the queue
1986 * now that the critical section has been exited. */
1989 prvLockQueue( pxQueue );
1991 /* Update the timeout state to see if it has expired yet. */
1992 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1994 /* Timeout has not expired yet, check to see if there is data in the
1995 * queue now, and if not enter the Blocked state to wait for data. */
1996 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1998 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1999 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
2000 prvUnlockQueue( pxQueue );
2002 if( xTaskResumeAll() == pdFALSE )
2004 taskYIELD_WITHIN_API();
2008 mtCOVERAGE_TEST_MARKER();
2013 /* There is data in the queue now, so don't enter the blocked
2014 * state, instead return to try and obtain the data. */
2015 prvUnlockQueue( pxQueue );
2016 ( void ) xTaskResumeAll();
2021 /* The timeout has expired. If there is still no data in the queue
2022 * exit, otherwise go back and try to read the data again. */
2023 prvUnlockQueue( pxQueue );
2024 ( void ) xTaskResumeAll();
2026 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2028 traceQUEUE_PEEK_FAILED( pxQueue );
2029 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
2031 return errQUEUE_EMPTY;
2035 mtCOVERAGE_TEST_MARKER();
2038 } /*lint -restore */
2040 /*-----------------------------------------------------------*/
2042 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
2043 void * const pvBuffer,
2044 BaseType_t * const pxHigherPriorityTaskWoken )
2047 UBaseType_t uxSavedInterruptStatus;
2048 Queue_t * const pxQueue = xQueue;
2050 traceENTER_xQueueReceiveFromISR( xQueue, pvBuffer, pxHigherPriorityTaskWoken );
2052 configASSERT( pxQueue );
2053 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2055 /* RTOS ports that support interrupt nesting have the concept of a maximum
2056 * system call (or maximum API call) interrupt priority. Interrupts that are
2057 * above the maximum system call priority are kept permanently enabled, even
2058 * when the RTOS kernel is in a critical section, but cannot make any calls to
2059 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2060 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2061 * failure if a FreeRTOS API function is called from an interrupt that has been
2062 * assigned a priority above the configured maximum system call priority.
2063 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2064 * that have been assigned a priority at or (logically) below the maximum
2065 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2066 * safe API to ensure interrupt entry is as fast and as simple as possible.
2067 * More information (albeit Cortex-M specific) is provided on the following
2068 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2069 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2071 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2073 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2075 /* Cannot block in an ISR, so check there is data available. */
2076 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2078 const int8_t cRxLock = pxQueue->cRxLock;
2080 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
2082 prvCopyDataFromQueue( pxQueue, pvBuffer );
2083 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
2085 /* If the queue is locked the event list will not be modified.
2086 * Instead update the lock count so the task that unlocks the queue
2087 * will know that an ISR has removed data while the queue was
2089 if( cRxLock == queueUNLOCKED )
2091 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2093 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2095 /* The task waiting has a higher priority than us so
2096 * force a context switch. */
2097 if( pxHigherPriorityTaskWoken != NULL )
2099 *pxHigherPriorityTaskWoken = pdTRUE;
2103 mtCOVERAGE_TEST_MARKER();
2108 mtCOVERAGE_TEST_MARKER();
2113 mtCOVERAGE_TEST_MARKER();
2118 /* Increment the lock count so the task that unlocks the queue
2119 * knows that data was removed while it was locked. */
2120 prvIncrementQueueRxLock( pxQueue, cRxLock );
2128 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
2131 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2133 traceRETURN_xQueueReceiveFromISR( xReturn );
2137 /*-----------------------------------------------------------*/
2139 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
2140 void * const pvBuffer )
2143 UBaseType_t uxSavedInterruptStatus;
2144 int8_t * pcOriginalReadPosition;
2145 Queue_t * const pxQueue = xQueue;
2147 traceENTER_xQueuePeekFromISR( xQueue, pvBuffer );
2149 configASSERT( pxQueue );
2150 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2151 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
2153 /* RTOS ports that support interrupt nesting have the concept of a maximum
2154 * system call (or maximum API call) interrupt priority. Interrupts that are
2155 * above the maximum system call priority are kept permanently enabled, even
2156 * when the RTOS kernel is in a critical section, but cannot make any calls to
2157 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2158 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2159 * failure if a FreeRTOS API function is called from an interrupt that has been
2160 * assigned a priority above the configured maximum system call priority.
2161 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2162 * that have been assigned a priority at or (logically) below the maximum
2163 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2164 * safe API to ensure interrupt entry is as fast and as simple as possible.
2165 * More information (albeit Cortex-M specific) is provided on the following
2166 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2167 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2169 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2171 /* Cannot block in an ISR, so check there is data available. */
2172 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2174 traceQUEUE_PEEK_FROM_ISR( pxQueue );
2176 /* Remember the read position so it can be reset as nothing is
2177 * actually being removed from the queue. */
2178 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
2179 prvCopyDataFromQueue( pxQueue, pvBuffer );
2180 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
2187 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
2190 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2192 traceRETURN_xQueuePeekFromISR( xReturn );
2196 /*-----------------------------------------------------------*/
2198 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
2200 UBaseType_t uxReturn;
2202 traceENTER_uxQueueMessagesWaiting( xQueue );
2204 configASSERT( xQueue );
2206 taskENTER_CRITICAL();
2208 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
2210 taskEXIT_CRITICAL();
2212 traceRETURN_uxQueueMessagesWaiting( uxReturn );
2215 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2216 /*-----------------------------------------------------------*/
2218 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
2220 UBaseType_t uxReturn;
2221 Queue_t * const pxQueue = xQueue;
2223 traceENTER_uxQueueSpacesAvailable( xQueue );
2225 configASSERT( pxQueue );
2227 taskENTER_CRITICAL();
2229 uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
2231 taskEXIT_CRITICAL();
2233 traceRETURN_uxQueueSpacesAvailable( uxReturn );
2236 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2237 /*-----------------------------------------------------------*/
2239 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
2241 UBaseType_t uxReturn;
2242 Queue_t * const pxQueue = xQueue;
2244 traceENTER_uxQueueMessagesWaitingFromISR( xQueue );
2246 configASSERT( pxQueue );
2247 uxReturn = pxQueue->uxMessagesWaiting;
2249 traceRETURN_uxQueueMessagesWaitingFromISR( uxReturn );
2252 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2253 /*-----------------------------------------------------------*/
2255 void vQueueDelete( QueueHandle_t xQueue )
2257 Queue_t * const pxQueue = xQueue;
2259 traceENTER_vQueueDelete( xQueue );
2261 configASSERT( pxQueue );
2262 traceQUEUE_DELETE( pxQueue );
2264 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2266 vQueueUnregisterQueue( pxQueue );
2270 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2272 /* The queue can only have been allocated dynamically - free it
2274 vPortFree( pxQueue );
2276 #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2278 /* The queue could have been allocated statically or dynamically, so
2279 * check before attempting to free the memory. */
2280 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2282 vPortFree( pxQueue );
2286 mtCOVERAGE_TEST_MARKER();
2289 #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
2291 /* The queue must have been statically allocated, so is not going to be
2292 * deleted. Avoid compiler warnings about the unused parameter. */
2295 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2297 traceRETURN_vQueueDelete();
2299 /*-----------------------------------------------------------*/
2301 #if ( configUSE_TRACE_FACILITY == 1 )
2303 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2305 traceENTER_uxQueueGetQueueNumber( xQueue );
2307 traceRETURN_uxQueueGetQueueNumber( ( ( Queue_t * ) xQueue )->uxQueueNumber );
2309 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2312 #endif /* configUSE_TRACE_FACILITY */
2313 /*-----------------------------------------------------------*/
2315 #if ( configUSE_TRACE_FACILITY == 1 )
2317 void vQueueSetQueueNumber( QueueHandle_t xQueue,
2318 UBaseType_t uxQueueNumber )
2320 traceENTER_vQueueSetQueueNumber( xQueue, uxQueueNumber );
2322 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2324 traceRETURN_vQueueSetQueueNumber();
2327 #endif /* configUSE_TRACE_FACILITY */
2328 /*-----------------------------------------------------------*/
2330 #if ( configUSE_TRACE_FACILITY == 1 )
2332 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2334 traceENTER_ucQueueGetQueueType( xQueue );
2336 traceRETURN_ucQueueGetQueueType( ( ( Queue_t * ) xQueue )->ucQueueType );
2338 return ( ( Queue_t * ) xQueue )->ucQueueType;
2341 #endif /* configUSE_TRACE_FACILITY */
2342 /*-----------------------------------------------------------*/
2344 UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2346 traceENTER_uxQueueGetQueueItemSize( xQueue );
2348 traceRETURN_uxQueueGetQueueItemSize( ( ( Queue_t * ) xQueue )->uxItemSize );
2350 return ( ( Queue_t * ) xQueue )->uxItemSize;
2352 /*-----------------------------------------------------------*/
2354 UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2356 traceENTER_uxQueueGetQueueLength( xQueue );
2358 traceRETURN_uxQueueGetQueueLength( ( ( Queue_t * ) xQueue )->uxLength );
2360 return ( ( Queue_t * ) xQueue )->uxLength;
2362 /*-----------------------------------------------------------*/
2364 #if ( configUSE_MUTEXES == 1 )
2366 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2368 UBaseType_t uxHighestPriorityOfWaitingTasks;
2370 /* If a task waiting for a mutex causes the mutex holder to inherit a
2371 * priority, but the waiting task times out, then the holder should
2372 * disinherit the priority - but only down to the highest priority of any
2373 * other tasks that are waiting for the same mutex. For this purpose,
2374 * return the priority of the highest priority task that is waiting for the
2376 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2378 uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) ( ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ) );
2382 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2385 return uxHighestPriorityOfWaitingTasks;
2388 #endif /* configUSE_MUTEXES */
2389 /*-----------------------------------------------------------*/
2391 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
2392 const void * pvItemToQueue,
2393 const BaseType_t xPosition )
2395 BaseType_t xReturn = pdFALSE;
2396 UBaseType_t uxMessagesWaiting;
2398 /* This function is called from a critical section. */
2400 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2402 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2404 #if ( configUSE_MUTEXES == 1 )
2406 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2408 /* The mutex is no longer being held. */
2409 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2410 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2414 mtCOVERAGE_TEST_MARKER();
2417 #endif /* configUSE_MUTEXES */
2419 else if( xPosition == queueSEND_TO_BACK )
2421 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2422 pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2424 if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2426 pxQueue->pcWriteTo = pxQueue->pcHead;
2430 mtCOVERAGE_TEST_MARKER();
2435 ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
2436 pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2438 if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2440 pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2444 mtCOVERAGE_TEST_MARKER();
2447 if( xPosition == queueOVERWRITE )
2449 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2451 /* An item is not being added but overwritten, so subtract
2452 * one from the recorded number of items in the queue so when
2453 * one is added again below the number of recorded items remains
2455 --uxMessagesWaiting;
2459 mtCOVERAGE_TEST_MARKER();
2464 mtCOVERAGE_TEST_MARKER();
2468 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
2472 /*-----------------------------------------------------------*/
2474 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
2475 void * const pvBuffer )
2477 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2479 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2481 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
2483 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2487 mtCOVERAGE_TEST_MARKER();
2490 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2493 /*-----------------------------------------------------------*/
2495 static void prvUnlockQueue( Queue_t * const pxQueue )
2497 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2499 /* The lock counts contains the number of extra data items placed or
2500 * removed from the queue while the queue was locked. When a queue is
2501 * locked items can be added or removed, but the event lists cannot be
2503 taskENTER_CRITICAL();
2505 int8_t cTxLock = pxQueue->cTxLock;
2507 /* See if data was added to the queue while it was locked. */
2508 while( cTxLock > queueLOCKED_UNMODIFIED )
2510 /* Data was posted while the queue was locked. Are any tasks
2511 * blocked waiting for data to become available? */
2512 #if ( configUSE_QUEUE_SETS == 1 )
2514 if( pxQueue->pxQueueSetContainer != NULL )
2516 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
2518 /* The queue is a member of a queue set, and posting to
2519 * the queue set caused a higher priority task to unblock.
2520 * A context switch is required. */
2525 mtCOVERAGE_TEST_MARKER();
2530 /* Tasks that are removed from the event list will get
2531 * added to the pending ready list as the scheduler is still
2533 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2535 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2537 /* The task waiting has a higher priority so record that a
2538 * context switch is required. */
2543 mtCOVERAGE_TEST_MARKER();
2552 #else /* configUSE_QUEUE_SETS */
2554 /* Tasks that are removed from the event list will get added to
2555 * the pending ready list as the scheduler is still suspended. */
2556 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2558 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2560 /* The task waiting has a higher priority so record that
2561 * a context switch is required. */
2566 mtCOVERAGE_TEST_MARKER();
2574 #endif /* configUSE_QUEUE_SETS */
2579 pxQueue->cTxLock = queueUNLOCKED;
2581 taskEXIT_CRITICAL();
2583 /* Do the same for the Rx lock. */
2584 taskENTER_CRITICAL();
2586 int8_t cRxLock = pxQueue->cRxLock;
2588 while( cRxLock > queueLOCKED_UNMODIFIED )
2590 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2592 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2598 mtCOVERAGE_TEST_MARKER();
2609 pxQueue->cRxLock = queueUNLOCKED;
2611 taskEXIT_CRITICAL();
2613 /*-----------------------------------------------------------*/
2615 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
2619 taskENTER_CRITICAL();
2621 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2630 taskEXIT_CRITICAL();
2634 /*-----------------------------------------------------------*/
2636 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2639 Queue_t * const pxQueue = xQueue;
2641 traceENTER_xQueueIsQueueEmptyFromISR( xQueue );
2643 configASSERT( pxQueue );
2645 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2654 traceRETURN_xQueueIsQueueEmptyFromISR( xReturn );
2657 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2658 /*-----------------------------------------------------------*/
2660 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
2664 taskENTER_CRITICAL();
2666 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2675 taskEXIT_CRITICAL();
2679 /*-----------------------------------------------------------*/
2681 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2684 Queue_t * const pxQueue = xQueue;
2686 traceENTER_xQueueIsQueueFullFromISR( xQueue );
2688 configASSERT( pxQueue );
2690 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2699 traceRETURN_xQueueIsQueueFullFromISR( xReturn );
2702 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2703 /*-----------------------------------------------------------*/
2705 #if ( configUSE_CO_ROUTINES == 1 )
2707 BaseType_t xQueueCRSend( QueueHandle_t xQueue,
2708 const void * pvItemToQueue,
2709 TickType_t xTicksToWait )
2712 Queue_t * const pxQueue = xQueue;
2714 traceENTER_xQueueCRSend( xQueue, pvItemToQueue, xTicksToWait );
2716 /* If the queue is already full we may have to block. A critical section
2717 * is required to prevent an interrupt removing something from the queue
2718 * between the check to see if the queue is full and blocking on the queue. */
2719 portDISABLE_INTERRUPTS();
2721 if( prvIsQueueFull( pxQueue ) != pdFALSE )
2723 /* The queue is full - do we want to block or just leave without
2725 if( xTicksToWait > ( TickType_t ) 0 )
2727 /* As this is called from a coroutine we cannot block directly, but
2728 * return indicating that we need to block. */
2729 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2730 portENABLE_INTERRUPTS();
2731 return errQUEUE_BLOCKED;
2735 portENABLE_INTERRUPTS();
2736 return errQUEUE_FULL;
2740 portENABLE_INTERRUPTS();
2742 portDISABLE_INTERRUPTS();
2744 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2746 /* There is room in the queue, copy the data into the queue. */
2747 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2750 /* Were any co-routines waiting for data to become available? */
2751 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2753 /* In this instance the co-routine could be placed directly
2754 * into the ready list as we are within a critical section.
2755 * Instead the same pending ready list mechanism is used as if
2756 * the event were caused from within an interrupt. */
2757 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2759 /* The co-routine waiting has a higher priority so record
2760 * that a yield might be appropriate. */
2761 xReturn = errQUEUE_YIELD;
2765 mtCOVERAGE_TEST_MARKER();
2770 mtCOVERAGE_TEST_MARKER();
2775 xReturn = errQUEUE_FULL;
2778 portENABLE_INTERRUPTS();
2780 traceRETURN_xQueueCRSend( xReturn );
2785 #endif /* configUSE_CO_ROUTINES */
2786 /*-----------------------------------------------------------*/
2788 #if ( configUSE_CO_ROUTINES == 1 )
2790 BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
2792 TickType_t xTicksToWait )
2795 Queue_t * const pxQueue = xQueue;
2797 traceENTER_xQueueCRReceive( xQueue, pvBuffer, xTicksToWait );
2799 /* If the queue is already empty we may have to block. A critical section
2800 * is required to prevent an interrupt adding something to the queue
2801 * between the check to see if the queue is empty and blocking on the queue. */
2802 portDISABLE_INTERRUPTS();
2804 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2806 /* There are no messages in the queue, do we want to block or just
2807 * leave with nothing? */
2808 if( xTicksToWait > ( TickType_t ) 0 )
2810 /* As this is a co-routine we cannot block directly, but return
2811 * indicating that we need to block. */
2812 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2813 portENABLE_INTERRUPTS();
2814 return errQUEUE_BLOCKED;
2818 portENABLE_INTERRUPTS();
2819 return errQUEUE_FULL;
2824 mtCOVERAGE_TEST_MARKER();
2827 portENABLE_INTERRUPTS();
2829 portDISABLE_INTERRUPTS();
2831 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2833 /* Data is available from the queue. */
2834 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2836 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2838 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2842 mtCOVERAGE_TEST_MARKER();
2845 --( pxQueue->uxMessagesWaiting );
2846 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2850 /* Were any co-routines waiting for space to become available? */
2851 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2853 /* In this instance the co-routine could be placed directly
2854 * into the ready list as we are within a critical section.
2855 * Instead the same pending ready list mechanism is used as if
2856 * the event were caused from within an interrupt. */
2857 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2859 xReturn = errQUEUE_YIELD;
2863 mtCOVERAGE_TEST_MARKER();
2868 mtCOVERAGE_TEST_MARKER();
2876 portENABLE_INTERRUPTS();
2878 traceRETURN_xQueueCRReceive( xReturn );
2883 #endif /* configUSE_CO_ROUTINES */
2884 /*-----------------------------------------------------------*/
2886 #if ( configUSE_CO_ROUTINES == 1 )
2888 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue,
2889 const void * pvItemToQueue,
2890 BaseType_t xCoRoutinePreviouslyWoken )
2892 Queue_t * const pxQueue = xQueue;
2894 traceENTER_xQueueCRSendFromISR( xQueue, pvItemToQueue, xCoRoutinePreviouslyWoken );
2896 /* Cannot block within an ISR so if there is no space on the queue then
2897 * exit without doing anything. */
2898 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2900 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2902 /* We only want to wake one co-routine per ISR, so check that a
2903 * co-routine has not already been woken. */
2904 if( xCoRoutinePreviouslyWoken == pdFALSE )
2906 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2908 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2914 mtCOVERAGE_TEST_MARKER();
2919 mtCOVERAGE_TEST_MARKER();
2924 mtCOVERAGE_TEST_MARKER();
2929 mtCOVERAGE_TEST_MARKER();
2932 traceRETURN_xQueueCRSendFromISR( xCoRoutinePreviouslyWoken );
2934 return xCoRoutinePreviouslyWoken;
2937 #endif /* configUSE_CO_ROUTINES */
2938 /*-----------------------------------------------------------*/
2940 #if ( configUSE_CO_ROUTINES == 1 )
2942 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue,
2944 BaseType_t * pxCoRoutineWoken )
2947 Queue_t * const pxQueue = xQueue;
2949 traceENTER_xQueueCRReceiveFromISR( xQueue, pvBuffer, pxCoRoutineWoken );
2951 /* We cannot block from an ISR, so check there is data available. If
2952 * not then just leave without doing anything. */
2953 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2955 /* Copy the data from the queue. */
2956 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2958 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2960 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2964 mtCOVERAGE_TEST_MARKER();
2967 --( pxQueue->uxMessagesWaiting );
2968 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2970 if( ( *pxCoRoutineWoken ) == pdFALSE )
2972 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2974 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2976 *pxCoRoutineWoken = pdTRUE;
2980 mtCOVERAGE_TEST_MARKER();
2985 mtCOVERAGE_TEST_MARKER();
2990 mtCOVERAGE_TEST_MARKER();
3000 traceRETURN_xQueueCRReceiveFromISR( xReturn );
3005 #endif /* configUSE_CO_ROUTINES */
3006 /*-----------------------------------------------------------*/
3008 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3010 void vQueueAddToRegistry( QueueHandle_t xQueue,
3011 const char * pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3014 QueueRegistryItem_t * pxEntryToWrite = NULL;
3016 traceENTER_vQueueAddToRegistry( xQueue, pcQueueName );
3018 configASSERT( xQueue );
3020 if( pcQueueName != NULL )
3022 /* See if there is an empty space in the registry. A NULL name denotes
3024 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3026 /* Replace an existing entry if the queue is already in the registry. */
3027 if( xQueue == xQueueRegistry[ ux ].xHandle )
3029 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3032 /* Otherwise, store in the next empty location */
3033 else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) )
3035 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3039 mtCOVERAGE_TEST_MARKER();
3044 if( pxEntryToWrite != NULL )
3046 /* Store the information on this queue. */
3047 pxEntryToWrite->pcQueueName = pcQueueName;
3048 pxEntryToWrite->xHandle = xQueue;
3050 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
3053 traceRETURN_vQueueAddToRegistry();
3056 #endif /* configQUEUE_REGISTRY_SIZE */
3057 /*-----------------------------------------------------------*/
3059 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3061 const char * pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3064 const char * pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3066 traceENTER_pcQueueGetName( xQueue );
3068 configASSERT( xQueue );
3070 /* Note there is nothing here to protect against another task adding or
3071 * removing entries from the registry while it is being searched. */
3073 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3075 if( xQueueRegistry[ ux ].xHandle == xQueue )
3077 pcReturn = xQueueRegistry[ ux ].pcQueueName;
3082 mtCOVERAGE_TEST_MARKER();
3086 traceRETURN_pcQueueGetName( pcReturn );
3089 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
3091 #endif /* configQUEUE_REGISTRY_SIZE */
3092 /*-----------------------------------------------------------*/
3094 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3096 void vQueueUnregisterQueue( QueueHandle_t xQueue )
3100 traceENTER_vQueueUnregisterQueue( xQueue );
3102 configASSERT( xQueue );
3104 /* See if the handle of the queue being unregistered in actually in the
3106 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3108 if( xQueueRegistry[ ux ].xHandle == xQueue )
3110 /* Set the name to NULL to show that this slot if free again. */
3111 xQueueRegistry[ ux ].pcQueueName = NULL;
3113 /* Set the handle to NULL to ensure the same queue handle cannot
3114 * appear in the registry twice if it is added, removed, then
3116 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
3121 mtCOVERAGE_TEST_MARKER();
3125 traceRETURN_vQueueUnregisterQueue();
3126 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
3128 #endif /* configQUEUE_REGISTRY_SIZE */
3129 /*-----------------------------------------------------------*/
3131 #if ( configUSE_TIMERS == 1 )
3133 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
3134 TickType_t xTicksToWait,
3135 const BaseType_t xWaitIndefinitely )
3137 Queue_t * const pxQueue = xQueue;
3139 traceENTER_vQueueWaitForMessageRestricted( xQueue, xTicksToWait, xWaitIndefinitely );
3141 /* This function should not be called by application code hence the
3142 * 'Restricted' in its name. It is not part of the public API. It is
3143 * designed for use by kernel code, and has special calling requirements.
3144 * It can result in vListInsert() being called on a list that can only
3145 * possibly ever have one item in it, so the list will be fast, but even
3146 * so it should be called with the scheduler locked and not from a critical
3149 /* Only do anything if there are no messages in the queue. This function
3150 * will not actually cause the task to block, just place it on a blocked
3151 * list. It will not block until the scheduler is unlocked - at which
3152 * time a yield will be performed. If an item is added to the queue while
3153 * the queue is locked, and the calling task blocks on the queue, then the
3154 * calling task will be immediately unblocked when the queue is unlocked. */
3155 prvLockQueue( pxQueue );
3157 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
3159 /* There is nothing in the queue, block for the specified period. */
3160 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
3164 mtCOVERAGE_TEST_MARKER();
3167 prvUnlockQueue( pxQueue );
3169 traceRETURN_vQueueWaitForMessageRestricted();
3172 #endif /* configUSE_TIMERS */
3173 /*-----------------------------------------------------------*/
3175 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
3177 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
3179 QueueSetHandle_t pxQueue;
3181 traceENTER_xQueueCreateSet( uxEventQueueLength );
3183 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
3185 traceRETURN_xQueueCreateSet( pxQueue );
3190 #endif /* configUSE_QUEUE_SETS */
3191 /*-----------------------------------------------------------*/
3193 #if ( configUSE_QUEUE_SETS == 1 )
3195 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3196 QueueSetHandle_t xQueueSet )
3200 traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
3202 taskENTER_CRITICAL();
3204 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
3206 /* Cannot add a queue/semaphore to more than one queue set. */
3209 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
3211 /* Cannot add a queue/semaphore to a queue set if there are already
3212 * items in the queue/semaphore. */
3217 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
3221 taskEXIT_CRITICAL();
3223 traceRETURN_xQueueAddToSet( xReturn );
3228 #endif /* configUSE_QUEUE_SETS */
3229 /*-----------------------------------------------------------*/
3231 #if ( configUSE_QUEUE_SETS == 1 )
3233 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3234 QueueSetHandle_t xQueueSet )
3237 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
3239 traceENTER_xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet );
3241 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
3243 /* The queue was not a member of the set. */
3246 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
3248 /* It is dangerous to remove a queue from a set when the queue is
3249 * not empty because the queue set will still hold pending events for
3255 taskENTER_CRITICAL();
3257 /* The queue is no longer contained in the set. */
3258 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
3260 taskEXIT_CRITICAL();
3264 traceRETURN_xQueueRemoveFromSet( xReturn );
3267 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
3269 #endif /* configUSE_QUEUE_SETS */
3270 /*-----------------------------------------------------------*/
3272 #if ( configUSE_QUEUE_SETS == 1 )
3274 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
3275 TickType_t const xTicksToWait )
3277 QueueSetMemberHandle_t xReturn = NULL;
3279 traceENTER_xQueueSelectFromSet( xQueueSet, xTicksToWait );
3281 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
3283 traceRETURN_xQueueSelectFromSet( xReturn );
3288 #endif /* configUSE_QUEUE_SETS */
3289 /*-----------------------------------------------------------*/
3291 #if ( configUSE_QUEUE_SETS == 1 )
3293 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
3295 QueueSetMemberHandle_t xReturn = NULL;
3297 traceENTER_xQueueSelectFromSetFromISR( xQueueSet );
3299 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
3301 traceRETURN_xQueueSelectFromSetFromISR( xReturn );
3306 #endif /* configUSE_QUEUE_SETS */
3307 /*-----------------------------------------------------------*/
3309 #if ( configUSE_QUEUE_SETS == 1 )
3311 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
3313 Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
3314 BaseType_t xReturn = pdFALSE;
3316 /* This function must be called form a critical section. */
3318 /* The following line is not reachable in unit tests because every call
3319 * to prvNotifyQueueSetContainer is preceded by a check that
3320 * pxQueueSetContainer != NULL */
3321 configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
3322 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
3324 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
3326 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
3328 traceQUEUE_SET_SEND( pxQueueSetContainer );
3330 /* The data copied is the handle of the queue that contains data. */
3331 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
3333 if( cTxLock == queueUNLOCKED )
3335 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
3337 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
3339 /* The task waiting has a higher priority. */
3344 mtCOVERAGE_TEST_MARKER();
3349 mtCOVERAGE_TEST_MARKER();
3354 prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
3359 mtCOVERAGE_TEST_MARKER();
3365 #endif /* configUSE_QUEUE_SETS */