2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * SPDX-License-Identifier: MIT
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 * all the API functions to use the MPU wrappers. That should only be done when
34 * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
41 #if ( configUSE_CO_ROUTINES == 1 )
45 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
46 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
47 * for the header files above, but not in this file, in order to generate the
48 * correct privileged Vs unprivileged linkage and placement. */
49 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
52 /* Constants used with the cRxLock and cTxLock structure members. */
53 #define queueUNLOCKED ( ( int8_t ) -1 )
54 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
55 #define queueINT8_MAX ( ( int8_t ) 127 )
57 /* When the Queue_t structure is used to represent a base queue its pcHead and
58 * pcTail members are used as pointers into the queue storage area. When the
59 * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
60 * not necessary, and the pcHead pointer is set to NULL to indicate that the
61 * structure instead holds a pointer to the mutex holder (if any). Map alternative
62 * names to the pcHead and structure member to ensure the readability of the code
63 * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form
64 * a union as their usage is mutually exclusive dependent on what the queue is
66 #define uxQueueType pcHead
67 #define queueQUEUE_IS_MUTEX NULL
69 typedef struct QueuePointers
71 int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
72 int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
75 typedef struct SemaphoreData
77 TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */
78 UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
81 /* Semaphores do not actually store or copy data, so have an item size of
83 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
84 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
86 #if ( configUSE_PREEMPTION == 0 )
88 /* If the cooperative scheduler is being used then a yield should not be
89 * performed just because a higher priority task has been woken. */
90 #define queueYIELD_IF_USING_PREEMPTION()
92 #if ( configNUMBER_OF_CORES == 1 )
93 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
94 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
95 #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI()
96 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
100 * Definition of the queue used by the scheduler.
101 * Items are queued by copy, not reference. See the following link for the
102 * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
104 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
106 int8_t * pcHead; /**< Points to the beginning of the queue storage area. */
107 int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */
111 QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */
112 SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
115 List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
116 List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
118 volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
119 UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
120 UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */
122 volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
123 volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
125 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
126 uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
129 #if ( configUSE_QUEUE_SETS == 1 )
130 struct QueueDefinition * pxQueueSetContainer;
133 #if ( configUSE_TRACE_FACILITY == 1 )
134 UBaseType_t uxQueueNumber;
139 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
140 * name below to enable the use of older kernel aware debuggers. */
141 typedef xQUEUE Queue_t;
143 /*-----------------------------------------------------------*/
146 * The queue registry is just a means for kernel aware debuggers to locate
147 * queue structures. It has no other purpose so is an optional component.
149 #if ( configQUEUE_REGISTRY_SIZE > 0 )
151 /* The type stored within the queue registry array. This allows a name
152 * to be assigned to each queue making kernel aware debugging a little
153 * more user friendly. */
154 typedef struct QUEUE_REGISTRY_ITEM
156 const char * pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
157 QueueHandle_t xHandle;
158 } xQueueRegistryItem;
160 /* The old xQueueRegistryItem name is maintained above then typedefed to the
161 * new xQueueRegistryItem name below to enable the use of older kernel aware
163 typedef xQueueRegistryItem QueueRegistryItem_t;
165 /* The queue registry is simply an array of QueueRegistryItem_t structures.
166 * The pcQueueName member of a structure being NULL is indicative of the
167 * array position being vacant. */
168 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
170 #endif /* configQUEUE_REGISTRY_SIZE */
173 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
174 * prevent an ISR from adding or removing items to the queue, but does prevent
175 * an ISR from removing tasks from the queue event lists. If an ISR finds a
176 * queue is locked it will instead increment the appropriate queue lock count
177 * to indicate that a task may require unblocking. When the queue in unlocked
178 * these lock counts are inspected, and the appropriate action taken.
180 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
183 * Uses a critical section to determine if there is any data in a queue.
185 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
187 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
190 * Uses a critical section to determine if there is any space in a queue.
192 * @return pdTRUE if there is no space, otherwise pdFALSE;
194 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
197 * Copies an item into the queue, either at the front of the queue or the
200 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
201 const void * pvItemToQueue,
202 const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
205 * Copies an item out of a queue.
207 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
208 void * const pvBuffer ) PRIVILEGED_FUNCTION;
210 #if ( configUSE_QUEUE_SETS == 1 )
213 * Checks to see if a queue is a member of a queue set, and if so, notifies
214 * the queue set that the queue contains data.
216 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
220 * Called after a Queue_t structure has been allocated either statically or
221 * dynamically to fill in the structure's members.
223 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
224 const UBaseType_t uxItemSize,
225 uint8_t * pucQueueStorage,
226 const uint8_t ucQueueType,
227 Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
230 * Mutexes are a special type of queue. When a mutex is created, first the
231 * queue is created, then prvInitialiseMutex() is called to configure the queue
234 #if ( configUSE_MUTEXES == 1 )
235 static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
238 #if ( configUSE_MUTEXES == 1 )
241 * If a task waiting for a mutex causes the mutex holder to inherit a
242 * priority, but the waiting task times out, then the holder should
243 * disinherit the priority - but only down to the highest priority of any
244 * other tasks that are waiting for the same mutex. This function returns
247 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
249 /*-----------------------------------------------------------*/
252 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
253 * accessing the queue event lists.
255 #define prvLockQueue( pxQueue ) \
256 taskENTER_CRITICAL(); \
258 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
260 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
262 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
264 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
270 * Macro to increment cTxLock member of the queue data structure. It is
271 * capped at the number of tasks in the system as we cannot unblock more
272 * tasks than the number of tasks in the system.
274 #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \
276 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
277 if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \
279 configASSERT( ( cTxLock ) != queueINT8_MAX ); \
280 ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
285 * Macro to increment cRxLock member of the queue data structure. It is
286 * capped at the number of tasks in the system as we cannot unblock more
287 * tasks than the number of tasks in the system.
289 #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \
291 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
292 if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \
294 configASSERT( ( cRxLock ) != queueINT8_MAX ); \
295 ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
298 /*-----------------------------------------------------------*/
300 BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
301 BaseType_t xNewQueue )
303 BaseType_t xReturn = pdPASS;
304 Queue_t * const pxQueue = xQueue;
306 traceENTER_xQueueGenericReset( xQueue, xNewQueue );
308 configASSERT( pxQueue );
310 if( ( pxQueue != NULL ) &&
311 ( pxQueue->uxLength >= 1U ) &&
312 /* Check for multiplication overflow. */
313 ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
315 taskENTER_CRITICAL();
317 pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
318 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
319 pxQueue->pcWriteTo = pxQueue->pcHead;
320 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
321 pxQueue->cRxLock = queueUNLOCKED;
322 pxQueue->cTxLock = queueUNLOCKED;
324 if( xNewQueue == pdFALSE )
326 /* If there are tasks blocked waiting to read from the queue, then
327 * the tasks will remain blocked as after this function exits the queue
328 * will still be empty. If there are tasks blocked waiting to write to
329 * the queue, then one should be unblocked as after this function exits
330 * it will be possible to write to it. */
331 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
333 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
335 queueYIELD_IF_USING_PREEMPTION();
339 mtCOVERAGE_TEST_MARKER();
344 mtCOVERAGE_TEST_MARKER();
349 /* Ensure the event queues start in the correct state. */
350 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
351 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
361 configASSERT( xReturn != pdFAIL );
363 /* A value is returned for calling semantic consistency with previous
365 traceRETURN_xQueueGenericReset( xReturn );
369 /*-----------------------------------------------------------*/
371 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
373 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
374 const UBaseType_t uxItemSize,
375 uint8_t * pucQueueStorage,
376 StaticQueue_t * pxStaticQueue,
377 const uint8_t ucQueueType )
379 Queue_t * pxNewQueue = NULL;
381 traceENTER_xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
383 /* The StaticQueue_t structure and the queue storage area must be
385 configASSERT( pxStaticQueue );
387 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
388 ( pxStaticQueue != NULL ) &&
390 /* A queue storage area should be provided if the item size is not 0, and
391 * should not be provided if the item size is 0. */
392 ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0U ) ) ) &&
393 ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0U ) ) ) )
395 #if ( configASSERT_DEFINED == 1 )
397 /* Sanity check that the size of the structure used to declare a
398 * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
399 * the real queue and semaphore structures. */
400 volatile size_t xSize = sizeof( StaticQueue_t );
402 /* This assertion cannot be branch covered in unit tests */
403 configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
404 ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
406 #endif /* configASSERT_DEFINED */
408 /* The address of a statically allocated queue was passed in, use it.
409 * The address of a statically allocated storage area was also passed in
410 * but is already set. */
411 /* MISRA Ref 11.3.1 [Misaligned access] */
412 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
413 /* coverity[misra_c_2012_rule_11_3_violation] */
414 pxNewQueue = ( Queue_t * ) pxStaticQueue;
416 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
418 /* Queues can be allocated wither statically or dynamically, so
419 * note this queue was allocated statically in case the queue is
421 pxNewQueue->ucStaticallyAllocated = pdTRUE;
423 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
425 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
429 configASSERT( pxNewQueue );
430 mtCOVERAGE_TEST_MARKER();
433 traceRETURN_xQueueGenericCreateStatic( pxNewQueue );
438 #endif /* configSUPPORT_STATIC_ALLOCATION */
439 /*-----------------------------------------------------------*/
441 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
443 BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
444 uint8_t ** ppucQueueStorage,
445 StaticQueue_t ** ppxStaticQueue )
448 Queue_t * const pxQueue = xQueue;
450 traceENTER_xQueueGenericGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue );
452 configASSERT( pxQueue );
453 configASSERT( ppxStaticQueue );
455 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
457 /* Check if the queue was statically allocated. */
458 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
460 if( ppucQueueStorage != NULL )
462 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
465 /* MISRA Ref 11.3.1 [Misaligned access] */
466 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
467 /* coverity[misra_c_2012_rule_11_3_violation] */
468 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
476 #else /* configSUPPORT_DYNAMIC_ALLOCATION */
478 /* Queue must have been statically allocated. */
479 if( ppucQueueStorage != NULL )
481 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
484 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
487 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
489 traceRETURN_xQueueGenericGetStaticBuffers( xReturn );
494 #endif /* configSUPPORT_STATIC_ALLOCATION */
495 /*-----------------------------------------------------------*/
497 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
499 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
500 const UBaseType_t uxItemSize,
501 const uint8_t ucQueueType )
503 Queue_t * pxNewQueue = NULL;
504 size_t xQueueSizeInBytes;
505 uint8_t * pucQueueStorage;
507 traceENTER_xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
509 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
510 /* Check for multiplication overflow. */
511 ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
512 /* Check for addition overflow. */
513 ( ( UBaseType_t ) ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
515 /* Allocate enough space to hold the maximum number of items that
516 * can be in the queue at any time. It is valid for uxItemSize to be
517 * zero in the case the queue is used as a semaphore. */
518 xQueueSizeInBytes = ( size_t ) ( ( size_t ) uxQueueLength * ( size_t ) uxItemSize );
520 /* Allocate the queue and storage area. Justification for MISRA
521 * deviation as follows: pvPortMalloc() always ensures returned memory
522 * blocks are aligned per the requirements of the MCU stack. In this case
523 * pvPortMalloc() must return a pointer that is guaranteed to meet the
524 * alignment requirements of the Queue_t structure - which in this case
525 * is an int8_t *. Therefore, whenever the stack alignment requirements
526 * are greater than or equal to the pointer to char requirements the cast
527 * is safe. In other cases alignment requirements are not strict (one or
529 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
531 if( pxNewQueue != NULL )
533 /* Jump past the queue structure to find the location of the queue
535 pucQueueStorage = ( uint8_t * ) pxNewQueue;
536 pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
538 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
540 /* Queues can be created either statically or dynamically, so
541 * note this task was created dynamically in case it is later
543 pxNewQueue->ucStaticallyAllocated = pdFALSE;
545 #endif /* configSUPPORT_STATIC_ALLOCATION */
547 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
551 traceQUEUE_CREATE_FAILED( ucQueueType );
552 mtCOVERAGE_TEST_MARKER();
557 configASSERT( pxNewQueue );
558 mtCOVERAGE_TEST_MARKER();
561 traceRETURN_xQueueGenericCreate( pxNewQueue );
566 #endif /* configSUPPORT_STATIC_ALLOCATION */
567 /*-----------------------------------------------------------*/
569 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
570 const UBaseType_t uxItemSize,
571 uint8_t * pucQueueStorage,
572 const uint8_t ucQueueType,
573 Queue_t * pxNewQueue )
575 /* Remove compiler warnings about unused parameters should
576 * configUSE_TRACE_FACILITY not be set to 1. */
577 ( void ) ucQueueType;
579 if( uxItemSize == ( UBaseType_t ) 0 )
581 /* No RAM was allocated for the queue storage area, but PC head cannot
582 * be set to NULL because NULL is used as a key to say the queue is used as
583 * a mutex. Therefore just set pcHead to point to the queue as a benign
584 * value that is known to be within the memory map. */
585 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
589 /* Set the head to the start of the queue storage area. */
590 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
593 /* Initialise the queue members as described where the queue type is
595 pxNewQueue->uxLength = uxQueueLength;
596 pxNewQueue->uxItemSize = uxItemSize;
597 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
599 #if ( configUSE_TRACE_FACILITY == 1 )
601 pxNewQueue->ucQueueType = ucQueueType;
603 #endif /* configUSE_TRACE_FACILITY */
605 #if ( configUSE_QUEUE_SETS == 1 )
607 pxNewQueue->pxQueueSetContainer = NULL;
609 #endif /* configUSE_QUEUE_SETS */
611 traceQUEUE_CREATE( pxNewQueue );
613 /*-----------------------------------------------------------*/
615 #if ( configUSE_MUTEXES == 1 )
617 static void prvInitialiseMutex( Queue_t * pxNewQueue )
619 if( pxNewQueue != NULL )
621 /* The queue create function will set all the queue structure members
622 * correctly for a generic queue, but this function is creating a
623 * mutex. Overwrite those members that need to be set differently -
624 * in particular the information required for priority inheritance. */
625 pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
626 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
628 /* In case this is a recursive mutex. */
629 pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
631 traceCREATE_MUTEX( pxNewQueue );
633 /* Start with the semaphore in the expected state. */
634 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
638 traceCREATE_MUTEX_FAILED();
642 #endif /* configUSE_MUTEXES */
643 /*-----------------------------------------------------------*/
645 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
647 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
649 QueueHandle_t xNewQueue;
650 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
652 traceENTER_xQueueCreateMutex( ucQueueType );
654 xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
655 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
657 traceRETURN_xQueueCreateMutex( xNewQueue );
662 #endif /* configUSE_MUTEXES */
663 /*-----------------------------------------------------------*/
665 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
667 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
668 StaticQueue_t * pxStaticQueue )
670 QueueHandle_t xNewQueue;
671 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
673 traceENTER_xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
675 /* Prevent compiler warnings about unused parameters if
676 * configUSE_TRACE_FACILITY does not equal 1. */
677 ( void ) ucQueueType;
679 xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
680 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
682 traceRETURN_xQueueCreateMutexStatic( xNewQueue );
687 #endif /* configUSE_MUTEXES */
688 /*-----------------------------------------------------------*/
690 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
692 TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
694 TaskHandle_t pxReturn;
695 Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
697 traceENTER_xQueueGetMutexHolder( xSemaphore );
699 configASSERT( xSemaphore );
701 /* This function is called by xSemaphoreGetMutexHolder(), and should not
702 * be called directly. Note: This is a good way of determining if the
703 * calling task is the mutex holder, but not a good way of determining the
704 * identity of the mutex holder, as the holder may change between the
705 * following critical section exiting and the function returning. */
706 taskENTER_CRITICAL();
708 if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
710 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
719 traceRETURN_xQueueGetMutexHolder( pxReturn );
722 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
724 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
725 /*-----------------------------------------------------------*/
727 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
729 TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
731 TaskHandle_t pxReturn;
733 traceENTER_xQueueGetMutexHolderFromISR( xSemaphore );
735 configASSERT( xSemaphore );
737 /* Mutexes cannot be used in interrupt service routines, so the mutex
738 * holder should not change in an ISR, and therefore a critical section is
739 * not required here. */
740 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
742 pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
749 traceRETURN_xQueueGetMutexHolderFromISR( pxReturn );
752 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
754 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
755 /*-----------------------------------------------------------*/
757 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
759 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
762 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
764 traceENTER_xQueueGiveMutexRecursive( xMutex );
766 configASSERT( pxMutex );
768 /* If this is the task that holds the mutex then xMutexHolder will not
769 * change outside of this task. If this task does not hold the mutex then
770 * pxMutexHolder can never coincidentally equal the tasks handle, and as
771 * this is the only condition we are interested in it does not matter if
772 * pxMutexHolder is accessed simultaneously by another task. Therefore no
773 * mutual exclusion is required to test the pxMutexHolder variable. */
774 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
776 traceGIVE_MUTEX_RECURSIVE( pxMutex );
778 /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
779 * the task handle, therefore no underflow check is required. Also,
780 * uxRecursiveCallCount is only modified by the mutex holder, and as
781 * there can only be one, no mutual exclusion is required to modify the
782 * uxRecursiveCallCount member. */
783 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
785 /* Has the recursive call count unwound to 0? */
786 if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
788 /* Return the mutex. This will automatically unblock any other
789 * task that might be waiting to access the mutex. */
790 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
794 mtCOVERAGE_TEST_MARKER();
801 /* The mutex cannot be given because the calling task is not the
805 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
808 traceRETURN_xQueueGiveMutexRecursive( xReturn );
813 #endif /* configUSE_RECURSIVE_MUTEXES */
814 /*-----------------------------------------------------------*/
816 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
818 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
819 TickType_t xTicksToWait )
822 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
824 traceENTER_xQueueTakeMutexRecursive( xMutex, xTicksToWait );
826 configASSERT( pxMutex );
828 /* Comments regarding mutual exclusion as per those within
829 * xQueueGiveMutexRecursive(). */
831 traceTAKE_MUTEX_RECURSIVE( pxMutex );
833 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
835 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
840 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
842 /* pdPASS will only be returned if the mutex was successfully
843 * obtained. The calling task may have entered the Blocked state
844 * before reaching here. */
845 if( xReturn != pdFAIL )
847 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
851 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
855 traceRETURN_xQueueTakeMutexRecursive( xReturn );
860 #endif /* configUSE_RECURSIVE_MUTEXES */
861 /*-----------------------------------------------------------*/
863 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
865 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
866 const UBaseType_t uxInitialCount,
867 StaticQueue_t * pxStaticQueue )
869 QueueHandle_t xHandle = NULL;
871 traceENTER_xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
873 if( ( uxMaxCount != 0U ) &&
874 ( uxInitialCount <= uxMaxCount ) )
876 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
878 if( xHandle != NULL )
880 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
882 traceCREATE_COUNTING_SEMAPHORE();
886 traceCREATE_COUNTING_SEMAPHORE_FAILED();
891 configASSERT( xHandle );
892 mtCOVERAGE_TEST_MARKER();
895 traceRETURN_xQueueCreateCountingSemaphoreStatic( xHandle );
900 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
901 /*-----------------------------------------------------------*/
903 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
905 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
906 const UBaseType_t uxInitialCount )
908 QueueHandle_t xHandle = NULL;
910 traceENTER_xQueueCreateCountingSemaphore( uxMaxCount, uxInitialCount );
912 if( ( uxMaxCount != 0U ) &&
913 ( uxInitialCount <= uxMaxCount ) )
915 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
917 if( xHandle != NULL )
919 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
921 traceCREATE_COUNTING_SEMAPHORE();
925 traceCREATE_COUNTING_SEMAPHORE_FAILED();
930 configASSERT( xHandle );
931 mtCOVERAGE_TEST_MARKER();
934 traceRETURN_xQueueCreateCountingSemaphore( xHandle );
939 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
940 /*-----------------------------------------------------------*/
942 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
943 const void * const pvItemToQueue,
944 TickType_t xTicksToWait,
945 const BaseType_t xCopyPosition )
947 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
949 Queue_t * const pxQueue = xQueue;
951 traceENTER_xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
953 configASSERT( pxQueue );
954 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
955 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
956 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
958 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
962 /*lint -save -e904 This function relaxes the coding standard somewhat to
963 * allow return statements within the function itself. This is done in the
964 * interest of execution time efficiency. */
967 taskENTER_CRITICAL();
969 /* Is there room on the queue now? The running task must be the
970 * highest priority task wanting to access the queue. If the head item
971 * in the queue is to be overwritten then it does not matter if the
973 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
975 traceQUEUE_SEND( pxQueue );
977 #if ( configUSE_QUEUE_SETS == 1 )
979 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
981 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
983 if( pxQueue->pxQueueSetContainer != NULL )
985 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
987 /* Do not notify the queue set as an existing item
988 * was overwritten in the queue so the number of items
989 * in the queue has not changed. */
990 mtCOVERAGE_TEST_MARKER();
992 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
994 /* The queue is a member of a queue set, and posting
995 * to the queue set caused a higher priority task to
996 * unblock. A context switch is required. */
997 queueYIELD_IF_USING_PREEMPTION();
1001 mtCOVERAGE_TEST_MARKER();
1006 /* If there was a task waiting for data to arrive on the
1007 * queue then unblock it now. */
1008 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1010 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1012 /* The unblocked task has a priority higher than
1013 * our own so yield immediately. Yes it is ok to
1014 * do this from within the critical section - the
1015 * kernel takes care of that. */
1016 queueYIELD_IF_USING_PREEMPTION();
1020 mtCOVERAGE_TEST_MARKER();
1023 else if( xYieldRequired != pdFALSE )
1025 /* This path is a special case that will only get
1026 * executed if the task was holding multiple mutexes
1027 * and the mutexes were given back in an order that is
1028 * different to that in which they were taken. */
1029 queueYIELD_IF_USING_PREEMPTION();
1033 mtCOVERAGE_TEST_MARKER();
1037 #else /* configUSE_QUEUE_SETS */
1039 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1041 /* If there was a task waiting for data to arrive on the
1042 * queue then unblock it now. */
1043 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1045 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1047 /* The unblocked task has a priority higher than
1048 * our own so yield immediately. Yes it is ok to do
1049 * this from within the critical section - the kernel
1050 * takes care of that. */
1051 queueYIELD_IF_USING_PREEMPTION();
1055 mtCOVERAGE_TEST_MARKER();
1058 else if( xYieldRequired != pdFALSE )
1060 /* This path is a special case that will only get
1061 * executed if the task was holding multiple mutexes and
1062 * the mutexes were given back in an order that is
1063 * different to that in which they were taken. */
1064 queueYIELD_IF_USING_PREEMPTION();
1068 mtCOVERAGE_TEST_MARKER();
1071 #endif /* configUSE_QUEUE_SETS */
1073 taskEXIT_CRITICAL();
1075 traceRETURN_xQueueGenericSend( pdPASS );
1081 if( xTicksToWait == ( TickType_t ) 0 )
1083 /* The queue was full and no block time is specified (or
1084 * the block time has expired) so leave now. */
1085 taskEXIT_CRITICAL();
1087 /* Return to the original privilege level before exiting
1089 traceQUEUE_SEND_FAILED( pxQueue );
1090 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1092 return errQUEUE_FULL;
1094 else if( xEntryTimeSet == pdFALSE )
1096 /* The queue was full and a block time was specified so
1097 * configure the timeout structure. */
1098 vTaskInternalSetTimeOutState( &xTimeOut );
1099 xEntryTimeSet = pdTRUE;
1103 /* Entry time was already set. */
1104 mtCOVERAGE_TEST_MARKER();
1108 taskEXIT_CRITICAL();
1110 /* Interrupts and other tasks can send to and receive from the queue
1111 * now the critical section has been exited. */
1114 prvLockQueue( pxQueue );
1116 /* Update the timeout state to see if it has expired yet. */
1117 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1119 if( prvIsQueueFull( pxQueue ) != pdFALSE )
1121 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
1122 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
1124 /* Unlocking the queue means queue events can effect the
1125 * event list. It is possible that interrupts occurring now
1126 * remove this task from the event list again - but as the
1127 * scheduler is suspended the task will go onto the pending
1128 * ready list instead of the actual ready list. */
1129 prvUnlockQueue( pxQueue );
1131 /* Resuming the scheduler will move tasks from the pending
1132 * ready list into the ready list - so it is feasible that this
1133 * task is already in the ready list before it yields - in which
1134 * case the yield will not cause a context switch unless there
1135 * is also a higher priority task in the pending ready list. */
1136 if( xTaskResumeAll() == pdFALSE )
1138 taskYIELD_WITHIN_API();
1144 prvUnlockQueue( pxQueue );
1145 ( void ) xTaskResumeAll();
1150 /* The timeout has expired. */
1151 prvUnlockQueue( pxQueue );
1152 ( void ) xTaskResumeAll();
1154 traceQUEUE_SEND_FAILED( pxQueue );
1155 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1157 return errQUEUE_FULL;
1159 } /*lint -restore */
1161 /*-----------------------------------------------------------*/
1163 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
1164 const void * const pvItemToQueue,
1165 BaseType_t * const pxHigherPriorityTaskWoken,
1166 const BaseType_t xCopyPosition )
1169 UBaseType_t uxSavedInterruptStatus;
1170 Queue_t * const pxQueue = xQueue;
1172 traceENTER_xQueueGenericSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
1174 configASSERT( pxQueue );
1175 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1176 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1178 /* RTOS ports that support interrupt nesting have the concept of a maximum
1179 * system call (or maximum API call) interrupt priority. Interrupts that are
1180 * above the maximum system call priority are kept permanently enabled, even
1181 * when the RTOS kernel is in a critical section, but cannot make any calls to
1182 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1183 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1184 * failure if a FreeRTOS API function is called from an interrupt that has been
1185 * assigned a priority above the configured maximum system call priority.
1186 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1187 * that have been assigned a priority at or (logically) below the maximum
1188 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1189 * safe API to ensure interrupt entry is as fast and as simple as possible.
1190 * More information (albeit Cortex-M specific) is provided on the following
1191 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1192 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1194 /* Similar to xQueueGenericSend, except without blocking if there is no room
1195 * in the queue. Also don't directly wake a task that was blocked on a queue
1196 * read, instead return a flag to say whether a context switch is required or
1197 * not (i.e. has a task with a higher priority than us been woken by this
1199 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1201 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1203 const int8_t cTxLock = pxQueue->cTxLock;
1204 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
1206 traceQUEUE_SEND_FROM_ISR( pxQueue );
1208 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1209 * semaphore or mutex. That means prvCopyDataToQueue() cannot result
1210 * in a task disinheriting a priority and prvCopyDataToQueue() can be
1211 * called here even though the disinherit function does not check if
1212 * the scheduler is suspended before accessing the ready lists. */
1213 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1215 /* The event list is not altered if the queue is locked. This will
1216 * be done when the queue is unlocked later. */
1217 if( cTxLock == queueUNLOCKED )
1219 #if ( configUSE_QUEUE_SETS == 1 )
1221 if( pxQueue->pxQueueSetContainer != NULL )
1223 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
1225 /* Do not notify the queue set as an existing item
1226 * was overwritten in the queue so the number of items
1227 * in the queue has not changed. */
1228 mtCOVERAGE_TEST_MARKER();
1230 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1232 /* The queue is a member of a queue set, and posting
1233 * to the queue set caused a higher priority task to
1234 * unblock. A context switch is required. */
1235 if( pxHigherPriorityTaskWoken != NULL )
1237 *pxHigherPriorityTaskWoken = pdTRUE;
1241 mtCOVERAGE_TEST_MARKER();
1246 mtCOVERAGE_TEST_MARKER();
1251 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1253 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1255 /* The task waiting has a higher priority so
1256 * record that a context switch is required. */
1257 if( pxHigherPriorityTaskWoken != NULL )
1259 *pxHigherPriorityTaskWoken = pdTRUE;
1263 mtCOVERAGE_TEST_MARKER();
1268 mtCOVERAGE_TEST_MARKER();
1273 mtCOVERAGE_TEST_MARKER();
1277 #else /* configUSE_QUEUE_SETS */
1279 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1281 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1283 /* The task waiting has a higher priority so record that a
1284 * context switch is required. */
1285 if( pxHigherPriorityTaskWoken != NULL )
1287 *pxHigherPriorityTaskWoken = pdTRUE;
1291 mtCOVERAGE_TEST_MARKER();
1296 mtCOVERAGE_TEST_MARKER();
1301 mtCOVERAGE_TEST_MARKER();
1304 /* Not used in this path. */
1305 ( void ) uxPreviousMessagesWaiting;
1307 #endif /* configUSE_QUEUE_SETS */
1311 /* Increment the lock count so the task that unlocks the queue
1312 * knows that data was posted while it was locked. */
1313 prvIncrementQueueTxLock( pxQueue, cTxLock );
1320 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1321 xReturn = errQUEUE_FULL;
1324 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1326 traceRETURN_xQueueGenericSendFromISR( xReturn );
1330 /*-----------------------------------------------------------*/
1332 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
1333 BaseType_t * const pxHigherPriorityTaskWoken )
1336 UBaseType_t uxSavedInterruptStatus;
1337 Queue_t * const pxQueue = xQueue;
1339 traceENTER_xQueueGiveFromISR( xQueue, pxHigherPriorityTaskWoken );
1341 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1342 * item size is 0. Don't directly wake a task that was blocked on a queue
1343 * read, instead return a flag to say whether a context switch is required or
1344 * not (i.e. has a task with a higher priority than us been woken by this
1347 configASSERT( pxQueue );
1349 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1350 * if the item size is not 0. */
1351 configASSERT( pxQueue->uxItemSize == 0 );
1353 /* Normally a mutex would not be given from an interrupt, especially if
1354 * there is a mutex holder, as priority inheritance makes no sense for an
1355 * interrupts, only tasks. */
1356 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1358 /* RTOS ports that support interrupt nesting have the concept of a maximum
1359 * system call (or maximum API call) interrupt priority. Interrupts that are
1360 * above the maximum system call priority are kept permanently enabled, even
1361 * when the RTOS kernel is in a critical section, but cannot make any calls to
1362 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1363 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1364 * failure if a FreeRTOS API function is called from an interrupt that has been
1365 * assigned a priority above the configured maximum system call priority.
1366 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1367 * that have been assigned a priority at or (logically) below the maximum
1368 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1369 * safe API to ensure interrupt entry is as fast and as simple as possible.
1370 * More information (albeit Cortex-M specific) is provided on the following
1371 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1372 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1374 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1376 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1378 /* When the queue is used to implement a semaphore no data is ever
1379 * moved through the queue but it is still valid to see if the queue 'has
1381 if( uxMessagesWaiting < pxQueue->uxLength )
1383 const int8_t cTxLock = pxQueue->cTxLock;
1385 traceQUEUE_SEND_FROM_ISR( pxQueue );
1387 /* A task can only have an inherited priority if it is a mutex
1388 * holder - and if there is a mutex holder then the mutex cannot be
1389 * given from an ISR. As this is the ISR version of the function it
1390 * can be assumed there is no mutex holder and no need to determine if
1391 * priority disinheritance is needed. Simply increase the count of
1392 * messages (semaphores) available. */
1393 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
1395 /* The event list is not altered if the queue is locked. This will
1396 * be done when the queue is unlocked later. */
1397 if( cTxLock == queueUNLOCKED )
1399 #if ( configUSE_QUEUE_SETS == 1 )
1401 if( pxQueue->pxQueueSetContainer != NULL )
1403 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1405 /* The semaphore is a member of a queue set, and
1406 * posting to the queue set caused a higher priority
1407 * task to unblock. A context switch is required. */
1408 if( pxHigherPriorityTaskWoken != NULL )
1410 *pxHigherPriorityTaskWoken = pdTRUE;
1414 mtCOVERAGE_TEST_MARKER();
1419 mtCOVERAGE_TEST_MARKER();
1424 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1426 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1428 /* The task waiting has a higher priority so
1429 * record that a context switch is required. */
1430 if( pxHigherPriorityTaskWoken != NULL )
1432 *pxHigherPriorityTaskWoken = pdTRUE;
1436 mtCOVERAGE_TEST_MARKER();
1441 mtCOVERAGE_TEST_MARKER();
1446 mtCOVERAGE_TEST_MARKER();
1450 #else /* configUSE_QUEUE_SETS */
1452 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1454 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1456 /* The task waiting has a higher priority so record that a
1457 * context switch is required. */
1458 if( pxHigherPriorityTaskWoken != NULL )
1460 *pxHigherPriorityTaskWoken = pdTRUE;
1464 mtCOVERAGE_TEST_MARKER();
1469 mtCOVERAGE_TEST_MARKER();
1474 mtCOVERAGE_TEST_MARKER();
1477 #endif /* configUSE_QUEUE_SETS */
1481 /* Increment the lock count so the task that unlocks the queue
1482 * knows that data was posted while it was locked. */
1483 prvIncrementQueueTxLock( pxQueue, cTxLock );
1490 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1491 xReturn = errQUEUE_FULL;
1494 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1496 traceRETURN_xQueueGiveFromISR( xReturn );
1500 /*-----------------------------------------------------------*/
1502 BaseType_t xQueueReceive( QueueHandle_t xQueue,
1503 void * const pvBuffer,
1504 TickType_t xTicksToWait )
1506 BaseType_t xEntryTimeSet = pdFALSE;
1508 Queue_t * const pxQueue = xQueue;
1510 traceENTER_xQueueReceive( xQueue, pvBuffer, xTicksToWait );
1512 /* Check the pointer is not NULL. */
1513 configASSERT( ( pxQueue ) );
1515 /* The buffer into which data is received can only be NULL if the data size
1516 * is zero (so no data is copied into the buffer). */
1517 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1519 /* Cannot block if the scheduler is suspended. */
1520 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1522 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1526 /*lint -save -e904 This function relaxes the coding standard somewhat to
1527 * allow return statements within the function itself. This is done in the
1528 * interest of execution time efficiency. */
1531 taskENTER_CRITICAL();
1533 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1535 /* Is there data in the queue now? To be running the calling task
1536 * must be the highest priority task wanting to access the queue. */
1537 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1539 /* Data available, remove one item. */
1540 prvCopyDataFromQueue( pxQueue, pvBuffer );
1541 traceQUEUE_RECEIVE( pxQueue );
1542 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
1544 /* There is now space in the queue, were any tasks waiting to
1545 * post to the queue? If so, unblock the highest priority waiting
1547 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1549 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1551 queueYIELD_IF_USING_PREEMPTION();
1555 mtCOVERAGE_TEST_MARKER();
1560 mtCOVERAGE_TEST_MARKER();
1563 taskEXIT_CRITICAL();
1565 traceRETURN_xQueueReceive( pdPASS );
1571 if( xTicksToWait == ( TickType_t ) 0 )
1573 /* The queue was empty and no block time is specified (or
1574 * the block time has expired) so leave now. */
1575 taskEXIT_CRITICAL();
1577 traceQUEUE_RECEIVE_FAILED( pxQueue );
1578 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1580 return errQUEUE_EMPTY;
1582 else if( xEntryTimeSet == pdFALSE )
1584 /* The queue was empty and a block time was specified so
1585 * configure the timeout structure. */
1586 vTaskInternalSetTimeOutState( &xTimeOut );
1587 xEntryTimeSet = pdTRUE;
1591 /* Entry time was already set. */
1592 mtCOVERAGE_TEST_MARKER();
1596 taskEXIT_CRITICAL();
1598 /* Interrupts and other tasks can send to and receive from the queue
1599 * now the critical section has been exited. */
1602 prvLockQueue( pxQueue );
1604 /* Update the timeout state to see if it has expired yet. */
1605 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1607 /* The timeout has not expired. If the queue is still empty place
1608 * the task on the list of tasks waiting to receive from the queue. */
1609 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1611 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1612 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1613 prvUnlockQueue( pxQueue );
1615 if( xTaskResumeAll() == pdFALSE )
1617 taskYIELD_WITHIN_API();
1621 mtCOVERAGE_TEST_MARKER();
1626 /* The queue contains data again. Loop back to try and read the
1628 prvUnlockQueue( pxQueue );
1629 ( void ) xTaskResumeAll();
1634 /* Timed out. If there is no data in the queue exit, otherwise loop
1635 * back and attempt to read the data. */
1636 prvUnlockQueue( pxQueue );
1637 ( void ) xTaskResumeAll();
1639 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1641 traceQUEUE_RECEIVE_FAILED( pxQueue );
1642 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1644 return errQUEUE_EMPTY;
1648 mtCOVERAGE_TEST_MARKER();
1651 } /*lint -restore */
1653 /*-----------------------------------------------------------*/
1655 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
1656 TickType_t xTicksToWait )
1658 BaseType_t xEntryTimeSet = pdFALSE;
1660 Queue_t * const pxQueue = xQueue;
1662 #if ( configUSE_MUTEXES == 1 )
1663 BaseType_t xInheritanceOccurred = pdFALSE;
1666 traceENTER_xQueueSemaphoreTake( xQueue, xTicksToWait );
1668 /* Check the queue pointer is not NULL. */
1669 configASSERT( ( pxQueue ) );
1671 /* Check this really is a semaphore, in which case the item size will be
1673 configASSERT( pxQueue->uxItemSize == 0 );
1675 /* Cannot block if the scheduler is suspended. */
1676 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1678 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1682 /*lint -save -e904 This function relaxes the coding standard somewhat to allow return
1683 * statements within the function itself. This is done in the interest
1684 * of execution time efficiency. */
1687 taskENTER_CRITICAL();
1689 /* Semaphores are queues with an item size of 0, and where the
1690 * number of messages in the queue is the semaphore's count value. */
1691 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1693 /* Is there data in the queue now? To be running the calling task
1694 * must be the highest priority task wanting to access the queue. */
1695 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1697 traceQUEUE_RECEIVE( pxQueue );
1699 /* Semaphores are queues with a data size of zero and where the
1700 * messages waiting is the semaphore's count. Reduce the count. */
1701 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxSemaphoreCount - ( UBaseType_t ) 1 );
1703 #if ( configUSE_MUTEXES == 1 )
1705 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1707 /* Record the information required to implement
1708 * priority inheritance should it become necessary. */
1709 pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1713 mtCOVERAGE_TEST_MARKER();
1716 #endif /* configUSE_MUTEXES */
1718 /* Check to see if other tasks are blocked waiting to give the
1719 * semaphore, and if so, unblock the highest priority such task. */
1720 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1722 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1724 queueYIELD_IF_USING_PREEMPTION();
1728 mtCOVERAGE_TEST_MARKER();
1733 mtCOVERAGE_TEST_MARKER();
1736 taskEXIT_CRITICAL();
1738 traceRETURN_xQueueSemaphoreTake( pdPASS );
1744 if( xTicksToWait == ( TickType_t ) 0 )
1746 /* The semaphore count was 0 and no block time is specified
1747 * (or the block time has expired) so exit now. */
1748 taskEXIT_CRITICAL();
1750 traceQUEUE_RECEIVE_FAILED( pxQueue );
1751 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1753 return errQUEUE_EMPTY;
1755 else if( xEntryTimeSet == pdFALSE )
1757 /* The semaphore count was 0 and a block time was specified
1758 * so configure the timeout structure ready to block. */
1759 vTaskInternalSetTimeOutState( &xTimeOut );
1760 xEntryTimeSet = pdTRUE;
1764 /* Entry time was already set. */
1765 mtCOVERAGE_TEST_MARKER();
1769 taskEXIT_CRITICAL();
1771 /* Interrupts and other tasks can give to and take from the semaphore
1772 * now the critical section has been exited. */
1775 prvLockQueue( pxQueue );
1777 /* Update the timeout state to see if it has expired yet. */
1778 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1780 /* A block time is specified and not expired. If the semaphore
1781 * count is 0 then enter the Blocked state to wait for a semaphore to
1782 * become available. As semaphores are implemented with queues the
1783 * queue being empty is equivalent to the semaphore count being 0. */
1784 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1786 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1788 #if ( configUSE_MUTEXES == 1 )
1790 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1792 taskENTER_CRITICAL();
1794 xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1796 taskEXIT_CRITICAL();
1800 mtCOVERAGE_TEST_MARKER();
1803 #endif /* if ( configUSE_MUTEXES == 1 ) */
1805 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1806 prvUnlockQueue( pxQueue );
1808 if( xTaskResumeAll() == pdFALSE )
1810 taskYIELD_WITHIN_API();
1814 mtCOVERAGE_TEST_MARKER();
1819 /* There was no timeout and the semaphore count was not 0, so
1820 * attempt to take the semaphore again. */
1821 prvUnlockQueue( pxQueue );
1822 ( void ) xTaskResumeAll();
1828 prvUnlockQueue( pxQueue );
1829 ( void ) xTaskResumeAll();
1831 /* If the semaphore count is 0 exit now as the timeout has
1832 * expired. Otherwise return to attempt to take the semaphore that is
1833 * known to be available. As semaphores are implemented by queues the
1834 * queue being empty is equivalent to the semaphore count being 0. */
1835 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1837 #if ( configUSE_MUTEXES == 1 )
1839 /* xInheritanceOccurred could only have be set if
1840 * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1841 * test the mutex type again to check it is actually a mutex. */
1842 if( xInheritanceOccurred != pdFALSE )
1844 taskENTER_CRITICAL();
1846 UBaseType_t uxHighestWaitingPriority;
1848 /* This task blocking on the mutex caused another
1849 * task to inherit this task's priority. Now this task
1850 * has timed out the priority should be disinherited
1851 * again, but only as low as the next highest priority
1852 * task that is waiting for the same mutex. */
1853 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1854 vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1856 taskEXIT_CRITICAL();
1859 #endif /* configUSE_MUTEXES */
1861 traceQUEUE_RECEIVE_FAILED( pxQueue );
1862 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1864 return errQUEUE_EMPTY;
1868 mtCOVERAGE_TEST_MARKER();
1871 } /*lint -restore */
1873 /*-----------------------------------------------------------*/
1875 BaseType_t xQueuePeek( QueueHandle_t xQueue,
1876 void * const pvBuffer,
1877 TickType_t xTicksToWait )
1879 BaseType_t xEntryTimeSet = pdFALSE;
1881 int8_t * pcOriginalReadPosition;
1882 Queue_t * const pxQueue = xQueue;
1884 traceENTER_xQueuePeek( xQueue, pvBuffer, xTicksToWait );
1886 /* Check the pointer is not NULL. */
1887 configASSERT( ( pxQueue ) );
1889 /* The buffer into which data is received can only be NULL if the data size
1890 * is zero (so no data is copied into the buffer. */
1891 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1893 /* Cannot block if the scheduler is suspended. */
1894 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1896 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1900 /*lint -save -e904 This function relaxes the coding standard somewhat to
1901 * allow return statements within the function itself. This is done in the
1902 * interest of execution time efficiency. */
1905 taskENTER_CRITICAL();
1907 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1909 /* Is there data in the queue now? To be running the calling task
1910 * must be the highest priority task wanting to access the queue. */
1911 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1913 /* Remember the read position so it can be reset after the data
1914 * is read from the queue as this function is only peeking the
1915 * data, not removing it. */
1916 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1918 prvCopyDataFromQueue( pxQueue, pvBuffer );
1919 traceQUEUE_PEEK( pxQueue );
1921 /* The data is not being removed, so reset the read pointer. */
1922 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1924 /* The data is being left in the queue, so see if there are
1925 * any other tasks waiting for the data. */
1926 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1928 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1930 /* The task waiting has a higher priority than this task. */
1931 queueYIELD_IF_USING_PREEMPTION();
1935 mtCOVERAGE_TEST_MARKER();
1940 mtCOVERAGE_TEST_MARKER();
1943 taskEXIT_CRITICAL();
1945 traceRETURN_xQueuePeek( pdPASS );
1951 if( xTicksToWait == ( TickType_t ) 0 )
1953 /* The queue was empty and no block time is specified (or
1954 * the block time has expired) so leave now. */
1955 taskEXIT_CRITICAL();
1957 traceQUEUE_PEEK_FAILED( pxQueue );
1958 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
1960 return errQUEUE_EMPTY;
1962 else if( xEntryTimeSet == pdFALSE )
1964 /* The queue was empty and a block time was specified so
1965 * configure the timeout structure ready to enter the blocked
1967 vTaskInternalSetTimeOutState( &xTimeOut );
1968 xEntryTimeSet = pdTRUE;
1972 /* Entry time was already set. */
1973 mtCOVERAGE_TEST_MARKER();
1977 taskEXIT_CRITICAL();
1979 /* Interrupts and other tasks can send to and receive from the queue
1980 * now that the critical section has been exited. */
1983 prvLockQueue( pxQueue );
1985 /* Update the timeout state to see if it has expired yet. */
1986 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1988 /* Timeout has not expired yet, check to see if there is data in the
1989 * queue now, and if not enter the Blocked state to wait for data. */
1990 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1992 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1993 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1994 prvUnlockQueue( pxQueue );
1996 if( xTaskResumeAll() == pdFALSE )
1998 taskYIELD_WITHIN_API();
2002 mtCOVERAGE_TEST_MARKER();
2007 /* There is data in the queue now, so don't enter the blocked
2008 * state, instead return to try and obtain the data. */
2009 prvUnlockQueue( pxQueue );
2010 ( void ) xTaskResumeAll();
2015 /* The timeout has expired. If there is still no data in the queue
2016 * exit, otherwise go back and try to read the data again. */
2017 prvUnlockQueue( pxQueue );
2018 ( void ) xTaskResumeAll();
2020 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2022 traceQUEUE_PEEK_FAILED( pxQueue );
2023 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
2025 return errQUEUE_EMPTY;
2029 mtCOVERAGE_TEST_MARKER();
2032 } /*lint -restore */
2034 /*-----------------------------------------------------------*/
2036 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
2037 void * const pvBuffer,
2038 BaseType_t * const pxHigherPriorityTaskWoken )
2041 UBaseType_t uxSavedInterruptStatus;
2042 Queue_t * const pxQueue = xQueue;
2044 traceENTER_xQueueReceiveFromISR( xQueue, pvBuffer, pxHigherPriorityTaskWoken );
2046 configASSERT( pxQueue );
2047 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2049 /* RTOS ports that support interrupt nesting have the concept of a maximum
2050 * system call (or maximum API call) interrupt priority. Interrupts that are
2051 * above the maximum system call priority are kept permanently enabled, even
2052 * when the RTOS kernel is in a critical section, but cannot make any calls to
2053 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2054 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2055 * failure if a FreeRTOS API function is called from an interrupt that has been
2056 * assigned a priority above the configured maximum system call priority.
2057 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2058 * that have been assigned a priority at or (logically) below the maximum
2059 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2060 * safe API to ensure interrupt entry is as fast and as simple as possible.
2061 * More information (albeit Cortex-M specific) is provided on the following
2062 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2063 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2065 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2067 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2069 /* Cannot block in an ISR, so check there is data available. */
2070 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2072 const int8_t cRxLock = pxQueue->cRxLock;
2074 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
2076 prvCopyDataFromQueue( pxQueue, pvBuffer );
2077 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
2079 /* If the queue is locked the event list will not be modified.
2080 * Instead update the lock count so the task that unlocks the queue
2081 * will know that an ISR has removed data while the queue was
2083 if( cRxLock == queueUNLOCKED )
2085 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2087 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2089 /* The task waiting has a higher priority than us so
2090 * force a context switch. */
2091 if( pxHigherPriorityTaskWoken != NULL )
2093 *pxHigherPriorityTaskWoken = pdTRUE;
2097 mtCOVERAGE_TEST_MARKER();
2102 mtCOVERAGE_TEST_MARKER();
2107 mtCOVERAGE_TEST_MARKER();
2112 /* Increment the lock count so the task that unlocks the queue
2113 * knows that data was removed while it was locked. */
2114 prvIncrementQueueRxLock( pxQueue, cRxLock );
2122 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
2125 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2127 traceRETURN_xQueueReceiveFromISR( xReturn );
2131 /*-----------------------------------------------------------*/
2133 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
2134 void * const pvBuffer )
2137 UBaseType_t uxSavedInterruptStatus;
2138 int8_t * pcOriginalReadPosition;
2139 Queue_t * const pxQueue = xQueue;
2141 traceENTER_xQueuePeekFromISR( xQueue, pvBuffer );
2143 configASSERT( pxQueue );
2144 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2145 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
2147 /* RTOS ports that support interrupt nesting have the concept of a maximum
2148 * system call (or maximum API call) interrupt priority. Interrupts that are
2149 * above the maximum system call priority are kept permanently enabled, even
2150 * when the RTOS kernel is in a critical section, but cannot make any calls to
2151 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2152 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2153 * failure if a FreeRTOS API function is called from an interrupt that has been
2154 * assigned a priority above the configured maximum system call priority.
2155 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2156 * that have been assigned a priority at or (logically) below the maximum
2157 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2158 * safe API to ensure interrupt entry is as fast and as simple as possible.
2159 * More information (albeit Cortex-M specific) is provided on the following
2160 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2161 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2163 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2165 /* Cannot block in an ISR, so check there is data available. */
2166 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2168 traceQUEUE_PEEK_FROM_ISR( pxQueue );
2170 /* Remember the read position so it can be reset as nothing is
2171 * actually being removed from the queue. */
2172 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
2173 prvCopyDataFromQueue( pxQueue, pvBuffer );
2174 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
2181 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
2184 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2186 traceRETURN_xQueuePeekFromISR( xReturn );
2190 /*-----------------------------------------------------------*/
2192 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
2194 UBaseType_t uxReturn;
2196 traceENTER_uxQueueMessagesWaiting( xQueue );
2198 configASSERT( xQueue );
2200 taskENTER_CRITICAL();
2202 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
2204 taskEXIT_CRITICAL();
2206 traceRETURN_uxQueueMessagesWaiting( uxReturn );
2209 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2210 /*-----------------------------------------------------------*/
2212 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
2214 UBaseType_t uxReturn;
2215 Queue_t * const pxQueue = xQueue;
2217 traceENTER_uxQueueSpacesAvailable( xQueue );
2219 configASSERT( pxQueue );
2221 taskENTER_CRITICAL();
2223 uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
2225 taskEXIT_CRITICAL();
2227 traceRETURN_uxQueueSpacesAvailable( uxReturn );
2230 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2231 /*-----------------------------------------------------------*/
2233 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
2235 UBaseType_t uxReturn;
2236 Queue_t * const pxQueue = xQueue;
2238 traceENTER_uxQueueMessagesWaitingFromISR( xQueue );
2240 configASSERT( pxQueue );
2241 uxReturn = pxQueue->uxMessagesWaiting;
2243 traceRETURN_uxQueueMessagesWaitingFromISR( uxReturn );
2246 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2247 /*-----------------------------------------------------------*/
2249 void vQueueDelete( QueueHandle_t xQueue )
2251 Queue_t * const pxQueue = xQueue;
2253 traceENTER_vQueueDelete( xQueue );
2255 configASSERT( pxQueue );
2256 traceQUEUE_DELETE( pxQueue );
2258 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2260 vQueueUnregisterQueue( pxQueue );
2264 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2266 /* The queue can only have been allocated dynamically - free it
2268 vPortFree( pxQueue );
2270 #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2272 /* The queue could have been allocated statically or dynamically, so
2273 * check before attempting to free the memory. */
2274 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2276 vPortFree( pxQueue );
2280 mtCOVERAGE_TEST_MARKER();
2283 #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
2285 /* The queue must have been statically allocated, so is not going to be
2286 * deleted. Avoid compiler warnings about the unused parameter. */
2289 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2291 traceRETURN_vQueueDelete();
2293 /*-----------------------------------------------------------*/
2295 #if ( configUSE_TRACE_FACILITY == 1 )
2297 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2299 traceENTER_uxQueueGetQueueNumber( xQueue );
2301 traceRETURN_uxQueueGetQueueNumber( ( ( Queue_t * ) xQueue )->uxQueueNumber );
2303 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2306 #endif /* configUSE_TRACE_FACILITY */
2307 /*-----------------------------------------------------------*/
2309 #if ( configUSE_TRACE_FACILITY == 1 )
2311 void vQueueSetQueueNumber( QueueHandle_t xQueue,
2312 UBaseType_t uxQueueNumber )
2314 traceENTER_vQueueSetQueueNumber( xQueue, uxQueueNumber );
2316 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2318 traceRETURN_vQueueSetQueueNumber();
2321 #endif /* configUSE_TRACE_FACILITY */
2322 /*-----------------------------------------------------------*/
2324 #if ( configUSE_TRACE_FACILITY == 1 )
2326 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2328 traceENTER_ucQueueGetQueueType( xQueue );
2330 traceRETURN_ucQueueGetQueueType( ( ( Queue_t * ) xQueue )->ucQueueType );
2332 return ( ( Queue_t * ) xQueue )->ucQueueType;
2335 #endif /* configUSE_TRACE_FACILITY */
2336 /*-----------------------------------------------------------*/
2338 UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2340 traceENTER_uxQueueGetQueueItemSize( xQueue );
2342 traceRETURN_uxQueueGetQueueItemSize( ( ( Queue_t * ) xQueue )->uxItemSize );
2344 return ( ( Queue_t * ) xQueue )->uxItemSize;
2346 /*-----------------------------------------------------------*/
2348 UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2350 traceENTER_uxQueueGetQueueLength( xQueue );
2352 traceRETURN_uxQueueGetQueueLength( ( ( Queue_t * ) xQueue )->uxLength );
2354 return ( ( Queue_t * ) xQueue )->uxLength;
2356 /*-----------------------------------------------------------*/
2358 #if ( configUSE_MUTEXES == 1 )
2360 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2362 UBaseType_t uxHighestPriorityOfWaitingTasks;
2364 /* If a task waiting for a mutex causes the mutex holder to inherit a
2365 * priority, but the waiting task times out, then the holder should
2366 * disinherit the priority - but only down to the highest priority of any
2367 * other tasks that are waiting for the same mutex. For this purpose,
2368 * return the priority of the highest priority task that is waiting for the
2370 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2372 uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) ( ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ) );
2376 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2379 return uxHighestPriorityOfWaitingTasks;
2382 #endif /* configUSE_MUTEXES */
2383 /*-----------------------------------------------------------*/
2385 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
2386 const void * pvItemToQueue,
2387 const BaseType_t xPosition )
2389 BaseType_t xReturn = pdFALSE;
2390 UBaseType_t uxMessagesWaiting;
2392 /* This function is called from a critical section. */
2394 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2396 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2398 #if ( configUSE_MUTEXES == 1 )
2400 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2402 /* The mutex is no longer being held. */
2403 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2404 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2408 mtCOVERAGE_TEST_MARKER();
2411 #endif /* configUSE_MUTEXES */
2413 else if( xPosition == queueSEND_TO_BACK )
2415 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2416 pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2418 if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2420 pxQueue->pcWriteTo = pxQueue->pcHead;
2424 mtCOVERAGE_TEST_MARKER();
2429 ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
2430 pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2432 if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2434 pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2438 mtCOVERAGE_TEST_MARKER();
2441 if( xPosition == queueOVERWRITE )
2443 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2445 /* An item is not being added but overwritten, so subtract
2446 * one from the recorded number of items in the queue so when
2447 * one is added again below the number of recorded items remains
2449 --uxMessagesWaiting;
2453 mtCOVERAGE_TEST_MARKER();
2458 mtCOVERAGE_TEST_MARKER();
2462 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
2466 /*-----------------------------------------------------------*/
2468 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
2469 void * const pvBuffer )
2471 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2473 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2475 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
2477 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2481 mtCOVERAGE_TEST_MARKER();
2484 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2487 /*-----------------------------------------------------------*/
2489 static void prvUnlockQueue( Queue_t * const pxQueue )
2491 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2493 /* The lock counts contains the number of extra data items placed or
2494 * removed from the queue while the queue was locked. When a queue is
2495 * locked items can be added or removed, but the event lists cannot be
2497 taskENTER_CRITICAL();
2499 int8_t cTxLock = pxQueue->cTxLock;
2501 /* See if data was added to the queue while it was locked. */
2502 while( cTxLock > queueLOCKED_UNMODIFIED )
2504 /* Data was posted while the queue was locked. Are any tasks
2505 * blocked waiting for data to become available? */
2506 #if ( configUSE_QUEUE_SETS == 1 )
2508 if( pxQueue->pxQueueSetContainer != NULL )
2510 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
2512 /* The queue is a member of a queue set, and posting to
2513 * the queue set caused a higher priority task to unblock.
2514 * A context switch is required. */
2519 mtCOVERAGE_TEST_MARKER();
2524 /* Tasks that are removed from the event list will get
2525 * added to the pending ready list as the scheduler is still
2527 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2529 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2531 /* The task waiting has a higher priority so record that a
2532 * context switch is required. */
2537 mtCOVERAGE_TEST_MARKER();
2546 #else /* configUSE_QUEUE_SETS */
2548 /* Tasks that are removed from the event list will get added to
2549 * the pending ready list as the scheduler is still suspended. */
2550 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2552 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2554 /* The task waiting has a higher priority so record that
2555 * a context switch is required. */
2560 mtCOVERAGE_TEST_MARKER();
2568 #endif /* configUSE_QUEUE_SETS */
2573 pxQueue->cTxLock = queueUNLOCKED;
2575 taskEXIT_CRITICAL();
2577 /* Do the same for the Rx lock. */
2578 taskENTER_CRITICAL();
2580 int8_t cRxLock = pxQueue->cRxLock;
2582 while( cRxLock > queueLOCKED_UNMODIFIED )
2584 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2586 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2592 mtCOVERAGE_TEST_MARKER();
2603 pxQueue->cRxLock = queueUNLOCKED;
2605 taskEXIT_CRITICAL();
2607 /*-----------------------------------------------------------*/
2609 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
2613 taskENTER_CRITICAL();
2615 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2624 taskEXIT_CRITICAL();
2628 /*-----------------------------------------------------------*/
2630 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2633 Queue_t * const pxQueue = xQueue;
2635 traceENTER_xQueueIsQueueEmptyFromISR( xQueue );
2637 configASSERT( pxQueue );
2639 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2648 traceRETURN_xQueueIsQueueEmptyFromISR( xReturn );
2651 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2652 /*-----------------------------------------------------------*/
2654 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
2658 taskENTER_CRITICAL();
2660 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2669 taskEXIT_CRITICAL();
2673 /*-----------------------------------------------------------*/
2675 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2678 Queue_t * const pxQueue = xQueue;
2680 traceENTER_xQueueIsQueueFullFromISR( xQueue );
2682 configASSERT( pxQueue );
2684 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2693 traceRETURN_xQueueIsQueueFullFromISR( xReturn );
2696 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2697 /*-----------------------------------------------------------*/
2699 #if ( configUSE_CO_ROUTINES == 1 )
2701 BaseType_t xQueueCRSend( QueueHandle_t xQueue,
2702 const void * pvItemToQueue,
2703 TickType_t xTicksToWait )
2706 Queue_t * const pxQueue = xQueue;
2708 traceENTER_xQueueCRSend( xQueue, pvItemToQueue, xTicksToWait );
2710 /* If the queue is already full we may have to block. A critical section
2711 * is required to prevent an interrupt removing something from the queue
2712 * between the check to see if the queue is full and blocking on the queue. */
2713 portDISABLE_INTERRUPTS();
2715 if( prvIsQueueFull( pxQueue ) != pdFALSE )
2717 /* The queue is full - do we want to block or just leave without
2719 if( xTicksToWait > ( TickType_t ) 0 )
2721 /* As this is called from a coroutine we cannot block directly, but
2722 * return indicating that we need to block. */
2723 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2724 portENABLE_INTERRUPTS();
2725 return errQUEUE_BLOCKED;
2729 portENABLE_INTERRUPTS();
2730 return errQUEUE_FULL;
2734 portENABLE_INTERRUPTS();
2736 portDISABLE_INTERRUPTS();
2738 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2740 /* There is room in the queue, copy the data into the queue. */
2741 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2744 /* Were any co-routines waiting for data to become available? */
2745 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2747 /* In this instance the co-routine could be placed directly
2748 * into the ready list as we are within a critical section.
2749 * Instead the same pending ready list mechanism is used as if
2750 * the event were caused from within an interrupt. */
2751 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2753 /* The co-routine waiting has a higher priority so record
2754 * that a yield might be appropriate. */
2755 xReturn = errQUEUE_YIELD;
2759 mtCOVERAGE_TEST_MARKER();
2764 mtCOVERAGE_TEST_MARKER();
2769 xReturn = errQUEUE_FULL;
2772 portENABLE_INTERRUPTS();
2774 traceRETURN_xQueueCRSend( xReturn );
2779 #endif /* configUSE_CO_ROUTINES */
2780 /*-----------------------------------------------------------*/
2782 #if ( configUSE_CO_ROUTINES == 1 )
2784 BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
2786 TickType_t xTicksToWait )
2789 Queue_t * const pxQueue = xQueue;
2791 traceENTER_xQueueCRReceive( xQueue, pvBuffer, xTicksToWait );
2793 /* If the queue is already empty we may have to block. A critical section
2794 * is required to prevent an interrupt adding something to the queue
2795 * between the check to see if the queue is empty and blocking on the queue. */
2796 portDISABLE_INTERRUPTS();
2798 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2800 /* There are no messages in the queue, do we want to block or just
2801 * leave with nothing? */
2802 if( xTicksToWait > ( TickType_t ) 0 )
2804 /* As this is a co-routine we cannot block directly, but return
2805 * indicating that we need to block. */
2806 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2807 portENABLE_INTERRUPTS();
2808 return errQUEUE_BLOCKED;
2812 portENABLE_INTERRUPTS();
2813 return errQUEUE_FULL;
2818 mtCOVERAGE_TEST_MARKER();
2821 portENABLE_INTERRUPTS();
2823 portDISABLE_INTERRUPTS();
2825 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2827 /* Data is available from the queue. */
2828 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2830 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2832 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2836 mtCOVERAGE_TEST_MARKER();
2839 --( pxQueue->uxMessagesWaiting );
2840 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2844 /* Were any co-routines waiting for space to become available? */
2845 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2847 /* In this instance the co-routine could be placed directly
2848 * into the ready list as we are within a critical section.
2849 * Instead the same pending ready list mechanism is used as if
2850 * the event were caused from within an interrupt. */
2851 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2853 xReturn = errQUEUE_YIELD;
2857 mtCOVERAGE_TEST_MARKER();
2862 mtCOVERAGE_TEST_MARKER();
2870 portENABLE_INTERRUPTS();
2872 traceRETURN_xQueueCRReceive( xReturn );
2877 #endif /* configUSE_CO_ROUTINES */
2878 /*-----------------------------------------------------------*/
2880 #if ( configUSE_CO_ROUTINES == 1 )
2882 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue,
2883 const void * pvItemToQueue,
2884 BaseType_t xCoRoutinePreviouslyWoken )
2886 Queue_t * const pxQueue = xQueue;
2888 traceENTER_xQueueCRSendFromISR( xQueue, pvItemToQueue, xCoRoutinePreviouslyWoken );
2890 /* Cannot block within an ISR so if there is no space on the queue then
2891 * exit without doing anything. */
2892 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2894 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2896 /* We only want to wake one co-routine per ISR, so check that a
2897 * co-routine has not already been woken. */
2898 if( xCoRoutinePreviouslyWoken == pdFALSE )
2900 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2902 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2908 mtCOVERAGE_TEST_MARKER();
2913 mtCOVERAGE_TEST_MARKER();
2918 mtCOVERAGE_TEST_MARKER();
2923 mtCOVERAGE_TEST_MARKER();
2926 traceRETURN_xQueueCRSendFromISR( xCoRoutinePreviouslyWoken );
2928 return xCoRoutinePreviouslyWoken;
2931 #endif /* configUSE_CO_ROUTINES */
2932 /*-----------------------------------------------------------*/
2934 #if ( configUSE_CO_ROUTINES == 1 )
2936 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue,
2938 BaseType_t * pxCoRoutineWoken )
2941 Queue_t * const pxQueue = xQueue;
2943 traceENTER_xQueueCRReceiveFromISR( xQueue, pvBuffer, pxCoRoutineWoken );
2945 /* We cannot block from an ISR, so check there is data available. If
2946 * not then just leave without doing anything. */
2947 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2949 /* Copy the data from the queue. */
2950 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2952 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2954 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2958 mtCOVERAGE_TEST_MARKER();
2961 --( pxQueue->uxMessagesWaiting );
2962 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2964 if( ( *pxCoRoutineWoken ) == pdFALSE )
2966 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2968 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2970 *pxCoRoutineWoken = pdTRUE;
2974 mtCOVERAGE_TEST_MARKER();
2979 mtCOVERAGE_TEST_MARKER();
2984 mtCOVERAGE_TEST_MARKER();
2994 traceRETURN_xQueueCRReceiveFromISR( xReturn );
2999 #endif /* configUSE_CO_ROUTINES */
3000 /*-----------------------------------------------------------*/
3002 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3004 void vQueueAddToRegistry( QueueHandle_t xQueue,
3005 const char * pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3008 QueueRegistryItem_t * pxEntryToWrite = NULL;
3010 traceENTER_vQueueAddToRegistry( xQueue, pcQueueName );
3012 configASSERT( xQueue );
3014 if( pcQueueName != NULL )
3016 /* See if there is an empty space in the registry. A NULL name denotes
3018 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3020 /* Replace an existing entry if the queue is already in the registry. */
3021 if( xQueue == xQueueRegistry[ ux ].xHandle )
3023 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3026 /* Otherwise, store in the next empty location */
3027 else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) )
3029 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3033 mtCOVERAGE_TEST_MARKER();
3038 if( pxEntryToWrite != NULL )
3040 /* Store the information on this queue. */
3041 pxEntryToWrite->pcQueueName = pcQueueName;
3042 pxEntryToWrite->xHandle = xQueue;
3044 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
3047 traceRETURN_vQueueAddToRegistry();
3050 #endif /* configQUEUE_REGISTRY_SIZE */
3051 /*-----------------------------------------------------------*/
3053 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3055 const char * pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3058 const char * pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3060 traceENTER_pcQueueGetName( xQueue );
3062 configASSERT( xQueue );
3064 /* Note there is nothing here to protect against another task adding or
3065 * removing entries from the registry while it is being searched. */
3067 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3069 if( xQueueRegistry[ ux ].xHandle == xQueue )
3071 pcReturn = xQueueRegistry[ ux ].pcQueueName;
3076 mtCOVERAGE_TEST_MARKER();
3080 traceRETURN_pcQueueGetName( pcReturn );
3083 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
3085 #endif /* configQUEUE_REGISTRY_SIZE */
3086 /*-----------------------------------------------------------*/
3088 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3090 void vQueueUnregisterQueue( QueueHandle_t xQueue )
3094 traceENTER_vQueueUnregisterQueue( xQueue );
3096 configASSERT( xQueue );
3098 /* See if the handle of the queue being unregistered in actually in the
3100 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3102 if( xQueueRegistry[ ux ].xHandle == xQueue )
3104 /* Set the name to NULL to show that this slot if free again. */
3105 xQueueRegistry[ ux ].pcQueueName = NULL;
3107 /* Set the handle to NULL to ensure the same queue handle cannot
3108 * appear in the registry twice if it is added, removed, then
3110 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
3115 mtCOVERAGE_TEST_MARKER();
3119 traceRETURN_vQueueUnregisterQueue();
3120 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
3122 #endif /* configQUEUE_REGISTRY_SIZE */
3123 /*-----------------------------------------------------------*/
3125 #if ( configUSE_TIMERS == 1 )
3127 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
3128 TickType_t xTicksToWait,
3129 const BaseType_t xWaitIndefinitely )
3131 Queue_t * const pxQueue = xQueue;
3133 traceENTER_vQueueWaitForMessageRestricted( xQueue, xTicksToWait, xWaitIndefinitely );
3135 /* This function should not be called by application code hence the
3136 * 'Restricted' in its name. It is not part of the public API. It is
3137 * designed for use by kernel code, and has special calling requirements.
3138 * It can result in vListInsert() being called on a list that can only
3139 * possibly ever have one item in it, so the list will be fast, but even
3140 * so it should be called with the scheduler locked and not from a critical
3143 /* Only do anything if there are no messages in the queue. This function
3144 * will not actually cause the task to block, just place it on a blocked
3145 * list. It will not block until the scheduler is unlocked - at which
3146 * time a yield will be performed. If an item is added to the queue while
3147 * the queue is locked, and the calling task blocks on the queue, then the
3148 * calling task will be immediately unblocked when the queue is unlocked. */
3149 prvLockQueue( pxQueue );
3151 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
3153 /* There is nothing in the queue, block for the specified period. */
3154 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
3158 mtCOVERAGE_TEST_MARKER();
3161 prvUnlockQueue( pxQueue );
3163 traceRETURN_vQueueWaitForMessageRestricted();
3166 #endif /* configUSE_TIMERS */
3167 /*-----------------------------------------------------------*/
3169 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
3171 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
3173 QueueSetHandle_t pxQueue;
3175 traceENTER_xQueueCreateSet( uxEventQueueLength );
3177 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
3179 traceRETURN_xQueueCreateSet( pxQueue );
3184 #endif /* configUSE_QUEUE_SETS */
3185 /*-----------------------------------------------------------*/
3187 #if ( configUSE_QUEUE_SETS == 1 )
3189 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3190 QueueSetHandle_t xQueueSet )
3194 traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
3196 taskENTER_CRITICAL();
3198 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
3200 /* Cannot add a queue/semaphore to more than one queue set. */
3203 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
3205 /* Cannot add a queue/semaphore to a queue set if there are already
3206 * items in the queue/semaphore. */
3211 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
3215 taskEXIT_CRITICAL();
3217 traceRETURN_xQueueAddToSet( xReturn );
3222 #endif /* configUSE_QUEUE_SETS */
3223 /*-----------------------------------------------------------*/
3225 #if ( configUSE_QUEUE_SETS == 1 )
3227 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3228 QueueSetHandle_t xQueueSet )
3231 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
3233 traceENTER_xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet );
3235 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
3237 /* The queue was not a member of the set. */
3240 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
3242 /* It is dangerous to remove a queue from a set when the queue is
3243 * not empty because the queue set will still hold pending events for
3249 taskENTER_CRITICAL();
3251 /* The queue is no longer contained in the set. */
3252 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
3254 taskEXIT_CRITICAL();
3258 traceRETURN_xQueueRemoveFromSet( xReturn );
3261 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
3263 #endif /* configUSE_QUEUE_SETS */
3264 /*-----------------------------------------------------------*/
3266 #if ( configUSE_QUEUE_SETS == 1 )
3268 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
3269 TickType_t const xTicksToWait )
3271 QueueSetMemberHandle_t xReturn = NULL;
3273 traceENTER_xQueueSelectFromSet( xQueueSet, xTicksToWait );
3275 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
3277 traceRETURN_xQueueSelectFromSet( xReturn );
3282 #endif /* configUSE_QUEUE_SETS */
3283 /*-----------------------------------------------------------*/
3285 #if ( configUSE_QUEUE_SETS == 1 )
3287 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
3289 QueueSetMemberHandle_t xReturn = NULL;
3291 traceENTER_xQueueSelectFromSetFromISR( xQueueSet );
3293 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
3295 traceRETURN_xQueueSelectFromSetFromISR( xReturn );
3300 #endif /* configUSE_QUEUE_SETS */
3301 /*-----------------------------------------------------------*/
3303 #if ( configUSE_QUEUE_SETS == 1 )
3305 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
3307 Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
3308 BaseType_t xReturn = pdFALSE;
3310 /* This function must be called form a critical section. */
3312 /* The following line is not reachable in unit tests because every call
3313 * to prvNotifyQueueSetContainer is preceded by a check that
3314 * pxQueueSetContainer != NULL */
3315 configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
3316 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
3318 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
3320 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
3322 traceQUEUE_SET_SEND( pxQueueSetContainer );
3324 /* The data copied is the handle of the queue that contains data. */
3325 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
3327 if( cTxLock == queueUNLOCKED )
3329 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
3331 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
3333 /* The task waiting has a higher priority. */
3338 mtCOVERAGE_TEST_MARKER();
3343 mtCOVERAGE_TEST_MARKER();
3348 prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
3353 mtCOVERAGE_TEST_MARKER();
3359 #endif /* configUSE_QUEUE_SETS */