2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * SPDX-License-Identifier: MIT
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 * all the API functions to use the MPU wrappers. That should only be done when
34 * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
41 #if ( configUSE_CO_ROUTINES == 1 )
45 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
46 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
47 * for the header files above, but not in this file, in order to generate the
48 * correct privileged Vs unprivileged linkage and placement. */
49 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
52 /* Constants used with the cRxLock and cTxLock structure members. */
53 #define queueUNLOCKED ( ( int8_t ) -1 )
54 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
55 #define queueINT8_MAX ( ( int8_t ) 127 )
57 /* When the Queue_t structure is used to represent a base queue its pcHead and
58 * pcTail members are used as pointers into the queue storage area. When the
59 * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
60 * not necessary, and the pcHead pointer is set to NULL to indicate that the
61 * structure instead holds a pointer to the mutex holder (if any). Map alternative
62 * names to the pcHead and structure member to ensure the readability of the code
63 * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form
64 * a union as their usage is mutually exclusive dependent on what the queue is
66 #define uxQueueType pcHead
67 #define queueQUEUE_IS_MUTEX NULL
69 typedef struct QueuePointers
71 int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
72 int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
75 typedef struct SemaphoreData
77 TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */
78 UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
81 /* Semaphores do not actually store or copy data, so have an item size of
83 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
84 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
86 #if ( configUSE_PREEMPTION == 0 )
88 /* If the cooperative scheduler is being used then a yield should not be
89 * performed just because a higher priority task has been woken. */
90 #define queueYIELD_IF_USING_PREEMPTION()
92 #if ( configNUMBER_OF_CORES == 1 )
93 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
94 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
95 #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI()
96 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
100 * Definition of the queue used by the scheduler.
101 * Items are queued by copy, not reference. See the following link for the
102 * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
104 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
106 int8_t * pcHead; /**< Points to the beginning of the queue storage area. */
107 int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */
111 QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */
112 SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
115 List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
116 List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
118 volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
119 UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
120 UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */
122 volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
123 volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
125 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
126 uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
129 #if ( configUSE_QUEUE_SETS == 1 )
130 struct QueueDefinition * pxQueueSetContainer;
133 #if ( configUSE_TRACE_FACILITY == 1 )
134 UBaseType_t uxQueueNumber;
139 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
140 * name below to enable the use of older kernel aware debuggers. */
141 typedef xQUEUE Queue_t;
143 /*-----------------------------------------------------------*/
146 * The queue registry is just a means for kernel aware debuggers to locate
147 * queue structures. It has no other purpose so is an optional component.
149 #if ( configQUEUE_REGISTRY_SIZE > 0 )
151 /* The type stored within the queue registry array. This allows a name
152 * to be assigned to each queue making kernel aware debugging a little
153 * more user friendly. */
154 typedef struct QUEUE_REGISTRY_ITEM
156 const char * pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
157 QueueHandle_t xHandle;
158 } xQueueRegistryItem;
160 /* The old xQueueRegistryItem name is maintained above then typedefed to the
161 * new xQueueRegistryItem name below to enable the use of older kernel aware
163 typedef xQueueRegistryItem QueueRegistryItem_t;
165 /* The queue registry is simply an array of QueueRegistryItem_t structures.
166 * The pcQueueName member of a structure being NULL is indicative of the
167 * array position being vacant. */
168 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
170 #endif /* configQUEUE_REGISTRY_SIZE */
173 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
174 * prevent an ISR from adding or removing items to the queue, but does prevent
175 * an ISR from removing tasks from the queue event lists. If an ISR finds a
176 * queue is locked it will instead increment the appropriate queue lock count
177 * to indicate that a task may require unblocking. When the queue in unlocked
178 * these lock counts are inspected, and the appropriate action taken.
180 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
183 * Uses a critical section to determine if there is any data in a queue.
185 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
187 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
190 * Uses a critical section to determine if there is any space in a queue.
192 * @return pdTRUE if there is no space, otherwise pdFALSE;
194 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
197 * Copies an item into the queue, either at the front of the queue or the
200 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
201 const void * pvItemToQueue,
202 const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
205 * Copies an item out of a queue.
207 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
208 void * const pvBuffer ) PRIVILEGED_FUNCTION;
210 #if ( configUSE_QUEUE_SETS == 1 )
213 * Checks to see if a queue is a member of a queue set, and if so, notifies
214 * the queue set that the queue contains data.
216 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
220 * Called after a Queue_t structure has been allocated either statically or
221 * dynamically to fill in the structure's members.
223 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
224 const UBaseType_t uxItemSize,
225 uint8_t * pucQueueStorage,
226 const uint8_t ucQueueType,
227 Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
230 * Mutexes are a special type of queue. When a mutex is created, first the
231 * queue is created, then prvInitialiseMutex() is called to configure the queue
234 #if ( configUSE_MUTEXES == 1 )
235 static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
238 #if ( configUSE_MUTEXES == 1 )
241 * If a task waiting for a mutex causes the mutex holder to inherit a
242 * priority, but the waiting task times out, then the holder should
243 * disinherit the priority - but only down to the highest priority of any
244 * other tasks that are waiting for the same mutex. This function returns
247 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
249 /*-----------------------------------------------------------*/
252 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
253 * accessing the queue event lists.
255 #define prvLockQueue( pxQueue ) \
256 taskENTER_CRITICAL(); \
258 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
260 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
262 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
264 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
270 * Macro to increment cTxLock member of the queue data structure. It is
271 * capped at the number of tasks in the system as we cannot unblock more
272 * tasks than the number of tasks in the system.
274 #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \
276 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
277 if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \
279 configASSERT( ( cTxLock ) != queueINT8_MAX ); \
280 ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
285 * Macro to increment cRxLock member of the queue data structure. It is
286 * capped at the number of tasks in the system as we cannot unblock more
287 * tasks than the number of tasks in the system.
289 #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \
291 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
292 if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \
294 configASSERT( ( cRxLock ) != queueINT8_MAX ); \
295 ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
298 /*-----------------------------------------------------------*/
300 BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
301 BaseType_t xNewQueue )
303 BaseType_t xReturn = pdPASS;
304 Queue_t * const pxQueue = xQueue;
306 traceENTER_xQueueGenericReset( xQueue, xNewQueue );
308 configASSERT( pxQueue );
310 if( ( pxQueue != NULL ) &&
311 ( pxQueue->uxLength >= 1U ) &&
312 /* Check for multiplication overflow. */
313 ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
315 taskENTER_CRITICAL();
317 pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
318 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
319 pxQueue->pcWriteTo = pxQueue->pcHead;
320 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
321 pxQueue->cRxLock = queueUNLOCKED;
322 pxQueue->cTxLock = queueUNLOCKED;
324 if( xNewQueue == pdFALSE )
326 /* If there are tasks blocked waiting to read from the queue, then
327 * the tasks will remain blocked as after this function exits the queue
328 * will still be empty. If there are tasks blocked waiting to write to
329 * the queue, then one should be unblocked as after this function exits
330 * it will be possible to write to it. */
331 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
333 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
335 queueYIELD_IF_USING_PREEMPTION();
339 mtCOVERAGE_TEST_MARKER();
344 mtCOVERAGE_TEST_MARKER();
349 /* Ensure the event queues start in the correct state. */
350 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
351 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
361 configASSERT( xReturn != pdFAIL );
363 /* A value is returned for calling semantic consistency with previous
365 traceRETURN_xQueueGenericReset( xReturn );
369 /*-----------------------------------------------------------*/
371 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
373 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
374 const UBaseType_t uxItemSize,
375 uint8_t * pucQueueStorage,
376 StaticQueue_t * pxStaticQueue,
377 const uint8_t ucQueueType )
379 Queue_t * pxNewQueue = NULL;
381 traceENTER_xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
383 /* The StaticQueue_t structure and the queue storage area must be
385 configASSERT( pxStaticQueue );
387 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
388 ( pxStaticQueue != NULL ) &&
390 /* A queue storage area should be provided if the item size is not 0, and
391 * should not be provided if the item size is 0. */
392 ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ) &&
393 ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ) )
395 #if ( configASSERT_DEFINED == 1 )
397 /* Sanity check that the size of the structure used to declare a
398 * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
399 * the real queue and semaphore structures. */
400 volatile size_t xSize = sizeof( StaticQueue_t );
402 /* This assertion cannot be branch covered in unit tests */
403 configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
404 ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
406 #endif /* configASSERT_DEFINED */
408 /* The address of a statically allocated queue was passed in, use it.
409 * The address of a statically allocated storage area was also passed in
410 * but is already set. */
411 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
413 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
415 /* Queues can be allocated wither statically or dynamically, so
416 * note this queue was allocated statically in case the queue is
418 pxNewQueue->ucStaticallyAllocated = pdTRUE;
420 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
422 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
426 configASSERT( pxNewQueue );
427 mtCOVERAGE_TEST_MARKER();
430 traceRETURN_xQueueGenericCreateStatic( pxNewQueue );
435 #endif /* configSUPPORT_STATIC_ALLOCATION */
436 /*-----------------------------------------------------------*/
438 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
440 BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
441 uint8_t ** ppucQueueStorage,
442 StaticQueue_t ** ppxStaticQueue )
445 Queue_t * const pxQueue = xQueue;
447 traceENTER_xQueueGenericGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue );
449 configASSERT( pxQueue );
450 configASSERT( ppxStaticQueue );
452 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
454 /* Check if the queue was statically allocated. */
455 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
457 if( ppucQueueStorage != NULL )
459 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
462 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
470 #else /* configSUPPORT_DYNAMIC_ALLOCATION */
472 /* Queue must have been statically allocated. */
473 if( ppucQueueStorage != NULL )
475 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
478 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
481 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
483 traceRETURN_xQueueGenericGetStaticBuffers( xReturn );
488 #endif /* configSUPPORT_STATIC_ALLOCATION */
489 /*-----------------------------------------------------------*/
491 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
493 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
494 const UBaseType_t uxItemSize,
495 const uint8_t ucQueueType )
497 Queue_t * pxNewQueue = NULL;
498 size_t xQueueSizeInBytes;
499 uint8_t * pucQueueStorage;
501 traceENTER_xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
503 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
504 /* Check for multiplication overflow. */
505 ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
506 /* Check for addition overflow. */
507 ( ( UBaseType_t ) ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
509 /* Allocate enough space to hold the maximum number of items that
510 * can be in the queue at any time. It is valid for uxItemSize to be
511 * zero in the case the queue is used as a semaphore. */
512 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
514 /* Allocate the queue and storage area. Justification for MISRA
515 * deviation as follows: pvPortMalloc() always ensures returned memory
516 * blocks are aligned per the requirements of the MCU stack. In this case
517 * pvPortMalloc() must return a pointer that is guaranteed to meet the
518 * alignment requirements of the Queue_t structure - which in this case
519 * is an int8_t *. Therefore, whenever the stack alignment requirements
520 * are greater than or equal to the pointer to char requirements the cast
521 * is safe. In other cases alignment requirements are not strict (one or
523 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
525 if( pxNewQueue != NULL )
527 /* Jump past the queue structure to find the location of the queue
529 pucQueueStorage = ( uint8_t * ) pxNewQueue;
530 pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
532 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
534 /* Queues can be created either statically or dynamically, so
535 * note this task was created dynamically in case it is later
537 pxNewQueue->ucStaticallyAllocated = pdFALSE;
539 #endif /* configSUPPORT_STATIC_ALLOCATION */
541 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
545 traceQUEUE_CREATE_FAILED( ucQueueType );
546 mtCOVERAGE_TEST_MARKER();
551 configASSERT( pxNewQueue );
552 mtCOVERAGE_TEST_MARKER();
555 traceRETURN_xQueueGenericCreate( pxNewQueue );
560 #endif /* configSUPPORT_STATIC_ALLOCATION */
561 /*-----------------------------------------------------------*/
563 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
564 const UBaseType_t uxItemSize,
565 uint8_t * pucQueueStorage,
566 const uint8_t ucQueueType,
567 Queue_t * pxNewQueue )
569 /* Remove compiler warnings about unused parameters should
570 * configUSE_TRACE_FACILITY not be set to 1. */
571 ( void ) ucQueueType;
573 if( uxItemSize == ( UBaseType_t ) 0 )
575 /* No RAM was allocated for the queue storage area, but PC head cannot
576 * be set to NULL because NULL is used as a key to say the queue is used as
577 * a mutex. Therefore just set pcHead to point to the queue as a benign
578 * value that is known to be within the memory map. */
579 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
583 /* Set the head to the start of the queue storage area. */
584 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
587 /* Initialise the queue members as described where the queue type is
589 pxNewQueue->uxLength = uxQueueLength;
590 pxNewQueue->uxItemSize = uxItemSize;
591 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
593 #if ( configUSE_TRACE_FACILITY == 1 )
595 pxNewQueue->ucQueueType = ucQueueType;
597 #endif /* configUSE_TRACE_FACILITY */
599 #if ( configUSE_QUEUE_SETS == 1 )
601 pxNewQueue->pxQueueSetContainer = NULL;
603 #endif /* configUSE_QUEUE_SETS */
605 traceQUEUE_CREATE( pxNewQueue );
607 /*-----------------------------------------------------------*/
609 #if ( configUSE_MUTEXES == 1 )
611 static void prvInitialiseMutex( Queue_t * pxNewQueue )
613 if( pxNewQueue != NULL )
615 /* The queue create function will set all the queue structure members
616 * correctly for a generic queue, but this function is creating a
617 * mutex. Overwrite those members that need to be set differently -
618 * in particular the information required for priority inheritance. */
619 pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
620 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
622 /* In case this is a recursive mutex. */
623 pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
625 traceCREATE_MUTEX( pxNewQueue );
627 /* Start with the semaphore in the expected state. */
628 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
632 traceCREATE_MUTEX_FAILED();
636 #endif /* configUSE_MUTEXES */
637 /*-----------------------------------------------------------*/
639 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
641 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
643 QueueHandle_t xNewQueue;
644 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
646 traceENTER_xQueueCreateMutex( ucQueueType );
648 xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
649 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
651 traceRETURN_xQueueCreateMutex( xNewQueue );
656 #endif /* configUSE_MUTEXES */
657 /*-----------------------------------------------------------*/
659 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
661 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
662 StaticQueue_t * pxStaticQueue )
664 QueueHandle_t xNewQueue;
665 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
667 traceENTER_xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
669 /* Prevent compiler warnings about unused parameters if
670 * configUSE_TRACE_FACILITY does not equal 1. */
671 ( void ) ucQueueType;
673 xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
674 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
676 traceRETURN_xQueueCreateMutexStatic( xNewQueue );
681 #endif /* configUSE_MUTEXES */
682 /*-----------------------------------------------------------*/
684 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
686 TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
688 TaskHandle_t pxReturn;
689 Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
691 traceENTER_xQueueGetMutexHolder( xSemaphore );
693 configASSERT( xSemaphore );
695 /* This function is called by xSemaphoreGetMutexHolder(), and should not
696 * be called directly. Note: This is a good way of determining if the
697 * calling task is the mutex holder, but not a good way of determining the
698 * identity of the mutex holder, as the holder may change between the
699 * following critical section exiting and the function returning. */
700 taskENTER_CRITICAL();
702 if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
704 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
713 traceRETURN_xQueueGetMutexHolder( pxReturn );
716 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
718 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
719 /*-----------------------------------------------------------*/
721 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
723 TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
725 TaskHandle_t pxReturn;
727 traceENTER_xQueueGetMutexHolderFromISR( xSemaphore );
729 configASSERT( xSemaphore );
731 /* Mutexes cannot be used in interrupt service routines, so the mutex
732 * holder should not change in an ISR, and therefore a critical section is
733 * not required here. */
734 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
736 pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
743 traceRETURN_xQueueGetMutexHolderFromISR( pxReturn );
746 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
748 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
749 /*-----------------------------------------------------------*/
751 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
753 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
756 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
758 traceENTER_xQueueGiveMutexRecursive( xMutex );
760 configASSERT( pxMutex );
762 /* If this is the task that holds the mutex then xMutexHolder will not
763 * change outside of this task. If this task does not hold the mutex then
764 * pxMutexHolder can never coincidentally equal the tasks handle, and as
765 * this is the only condition we are interested in it does not matter if
766 * pxMutexHolder is accessed simultaneously by another task. Therefore no
767 * mutual exclusion is required to test the pxMutexHolder variable. */
768 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
770 traceGIVE_MUTEX_RECURSIVE( pxMutex );
772 /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
773 * the task handle, therefore no underflow check is required. Also,
774 * uxRecursiveCallCount is only modified by the mutex holder, and as
775 * there can only be one, no mutual exclusion is required to modify the
776 * uxRecursiveCallCount member. */
777 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
779 /* Has the recursive call count unwound to 0? */
780 if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
782 /* Return the mutex. This will automatically unblock any other
783 * task that might be waiting to access the mutex. */
784 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
788 mtCOVERAGE_TEST_MARKER();
795 /* The mutex cannot be given because the calling task is not the
799 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
802 traceRETURN_xQueueGiveMutexRecursive( xReturn );
807 #endif /* configUSE_RECURSIVE_MUTEXES */
808 /*-----------------------------------------------------------*/
810 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
812 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
813 TickType_t xTicksToWait )
816 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
818 traceENTER_xQueueTakeMutexRecursive( xMutex, xTicksToWait );
820 configASSERT( pxMutex );
822 /* Comments regarding mutual exclusion as per those within
823 * xQueueGiveMutexRecursive(). */
825 traceTAKE_MUTEX_RECURSIVE( pxMutex );
827 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
829 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
834 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
836 /* pdPASS will only be returned if the mutex was successfully
837 * obtained. The calling task may have entered the Blocked state
838 * before reaching here. */
839 if( xReturn != pdFAIL )
841 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
845 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
849 traceRETURN_xQueueTakeMutexRecursive( xReturn );
854 #endif /* configUSE_RECURSIVE_MUTEXES */
855 /*-----------------------------------------------------------*/
857 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
859 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
860 const UBaseType_t uxInitialCount,
861 StaticQueue_t * pxStaticQueue )
863 QueueHandle_t xHandle = NULL;
865 traceENTER_xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
867 if( ( uxMaxCount != 0 ) &&
868 ( uxInitialCount <= uxMaxCount ) )
870 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
872 if( xHandle != NULL )
874 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
876 traceCREATE_COUNTING_SEMAPHORE();
880 traceCREATE_COUNTING_SEMAPHORE_FAILED();
885 configASSERT( xHandle );
886 mtCOVERAGE_TEST_MARKER();
889 traceRETURN_xQueueCreateCountingSemaphoreStatic( xHandle );
894 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
895 /*-----------------------------------------------------------*/
897 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
899 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
900 const UBaseType_t uxInitialCount )
902 QueueHandle_t xHandle = NULL;
904 traceENTER_xQueueCreateCountingSemaphore( uxMaxCount, uxInitialCount );
906 if( ( uxMaxCount != 0 ) &&
907 ( uxInitialCount <= uxMaxCount ) )
909 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
911 if( xHandle != NULL )
913 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
915 traceCREATE_COUNTING_SEMAPHORE();
919 traceCREATE_COUNTING_SEMAPHORE_FAILED();
924 configASSERT( xHandle );
925 mtCOVERAGE_TEST_MARKER();
928 traceRETURN_xQueueCreateCountingSemaphore( xHandle );
933 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
934 /*-----------------------------------------------------------*/
936 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
937 const void * const pvItemToQueue,
938 TickType_t xTicksToWait,
939 const BaseType_t xCopyPosition )
941 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
943 Queue_t * const pxQueue = xQueue;
945 traceENTER_xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
947 configASSERT( pxQueue );
948 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
949 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
950 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
952 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
956 /*lint -save -e904 This function relaxes the coding standard somewhat to
957 * allow return statements within the function itself. This is done in the
958 * interest of execution time efficiency. */
961 taskENTER_CRITICAL();
963 /* Is there room on the queue now? The running task must be the
964 * highest priority task wanting to access the queue. If the head item
965 * in the queue is to be overwritten then it does not matter if the
967 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
969 traceQUEUE_SEND( pxQueue );
971 #if ( configUSE_QUEUE_SETS == 1 )
973 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
975 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
977 if( pxQueue->pxQueueSetContainer != NULL )
979 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
981 /* Do not notify the queue set as an existing item
982 * was overwritten in the queue so the number of items
983 * in the queue has not changed. */
984 mtCOVERAGE_TEST_MARKER();
986 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
988 /* The queue is a member of a queue set, and posting
989 * to the queue set caused a higher priority task to
990 * unblock. A context switch is required. */
991 queueYIELD_IF_USING_PREEMPTION();
995 mtCOVERAGE_TEST_MARKER();
1000 /* If there was a task waiting for data to arrive on the
1001 * queue then unblock it now. */
1002 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1004 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1006 /* The unblocked task has a priority higher than
1007 * our own so yield immediately. Yes it is ok to
1008 * do this from within the critical section - the
1009 * kernel takes care of that. */
1010 queueYIELD_IF_USING_PREEMPTION();
1014 mtCOVERAGE_TEST_MARKER();
1017 else if( xYieldRequired != pdFALSE )
1019 /* This path is a special case that will only get
1020 * executed if the task was holding multiple mutexes
1021 * and the mutexes were given back in an order that is
1022 * different to that in which they were taken. */
1023 queueYIELD_IF_USING_PREEMPTION();
1027 mtCOVERAGE_TEST_MARKER();
1031 #else /* configUSE_QUEUE_SETS */
1033 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1035 /* If there was a task waiting for data to arrive on the
1036 * queue then unblock it now. */
1037 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1039 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1041 /* The unblocked task has a priority higher than
1042 * our own so yield immediately. Yes it is ok to do
1043 * this from within the critical section - the kernel
1044 * takes care of that. */
1045 queueYIELD_IF_USING_PREEMPTION();
1049 mtCOVERAGE_TEST_MARKER();
1052 else if( xYieldRequired != pdFALSE )
1054 /* This path is a special case that will only get
1055 * executed if the task was holding multiple mutexes and
1056 * the mutexes were given back in an order that is
1057 * different to that in which they were taken. */
1058 queueYIELD_IF_USING_PREEMPTION();
1062 mtCOVERAGE_TEST_MARKER();
1065 #endif /* configUSE_QUEUE_SETS */
1067 taskEXIT_CRITICAL();
1069 traceRETURN_xQueueGenericSend( pdPASS );
1075 if( xTicksToWait == ( TickType_t ) 0 )
1077 /* The queue was full and no block time is specified (or
1078 * the block time has expired) so leave now. */
1079 taskEXIT_CRITICAL();
1081 /* Return to the original privilege level before exiting
1083 traceQUEUE_SEND_FAILED( pxQueue );
1084 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1086 return errQUEUE_FULL;
1088 else if( xEntryTimeSet == pdFALSE )
1090 /* The queue was full and a block time was specified so
1091 * configure the timeout structure. */
1092 vTaskInternalSetTimeOutState( &xTimeOut );
1093 xEntryTimeSet = pdTRUE;
1097 /* Entry time was already set. */
1098 mtCOVERAGE_TEST_MARKER();
1102 taskEXIT_CRITICAL();
1104 /* Interrupts and other tasks can send to and receive from the queue
1105 * now the critical section has been exited. */
1108 prvLockQueue( pxQueue );
1110 /* Update the timeout state to see if it has expired yet. */
1111 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1113 if( prvIsQueueFull( pxQueue ) != pdFALSE )
1115 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
1116 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
1118 /* Unlocking the queue means queue events can effect the
1119 * event list. It is possible that interrupts occurring now
1120 * remove this task from the event list again - but as the
1121 * scheduler is suspended the task will go onto the pending
1122 * ready list instead of the actual ready list. */
1123 prvUnlockQueue( pxQueue );
1125 /* Resuming the scheduler will move tasks from the pending
1126 * ready list into the ready list - so it is feasible that this
1127 * task is already in the ready list before it yields - in which
1128 * case the yield will not cause a context switch unless there
1129 * is also a higher priority task in the pending ready list. */
1130 if( xTaskResumeAll() == pdFALSE )
1132 #if ( configNUMBER_OF_CORES == 1 )
1134 portYIELD_WITHIN_API();
1136 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
1138 vTaskYieldWithinAPI();
1140 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
1146 prvUnlockQueue( pxQueue );
1147 ( void ) xTaskResumeAll();
1152 /* The timeout has expired. */
1153 prvUnlockQueue( pxQueue );
1154 ( void ) xTaskResumeAll();
1156 traceQUEUE_SEND_FAILED( pxQueue );
1157 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1159 return errQUEUE_FULL;
1161 } /*lint -restore */
1163 /*-----------------------------------------------------------*/
1165 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
1166 const void * const pvItemToQueue,
1167 BaseType_t * const pxHigherPriorityTaskWoken,
1168 const BaseType_t xCopyPosition )
1171 UBaseType_t uxSavedInterruptStatus;
1172 Queue_t * const pxQueue = xQueue;
1174 traceENTER_xQueueGenericSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
1176 configASSERT( pxQueue );
1177 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1178 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1180 /* RTOS ports that support interrupt nesting have the concept of a maximum
1181 * system call (or maximum API call) interrupt priority. Interrupts that are
1182 * above the maximum system call priority are kept permanently enabled, even
1183 * when the RTOS kernel is in a critical section, but cannot make any calls to
1184 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1185 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1186 * failure if a FreeRTOS API function is called from an interrupt that has been
1187 * assigned a priority above the configured maximum system call priority.
1188 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1189 * that have been assigned a priority at or (logically) below the maximum
1190 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1191 * safe API to ensure interrupt entry is as fast and as simple as possible.
1192 * More information (albeit Cortex-M specific) is provided on the following
1193 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1194 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1196 /* Similar to xQueueGenericSend, except without blocking if there is no room
1197 * in the queue. Also don't directly wake a task that was blocked on a queue
1198 * read, instead return a flag to say whether a context switch is required or
1199 * not (i.e. has a task with a higher priority than us been woken by this
1201 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1203 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1205 const int8_t cTxLock = pxQueue->cTxLock;
1206 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
1208 traceQUEUE_SEND_FROM_ISR( pxQueue );
1210 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1211 * semaphore or mutex. That means prvCopyDataToQueue() cannot result
1212 * in a task disinheriting a priority and prvCopyDataToQueue() can be
1213 * called here even though the disinherit function does not check if
1214 * the scheduler is suspended before accessing the ready lists. */
1215 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1217 /* The event list is not altered if the queue is locked. This will
1218 * be done when the queue is unlocked later. */
1219 if( cTxLock == queueUNLOCKED )
1221 #if ( configUSE_QUEUE_SETS == 1 )
1223 if( pxQueue->pxQueueSetContainer != NULL )
1225 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
1227 /* Do not notify the queue set as an existing item
1228 * was overwritten in the queue so the number of items
1229 * in the queue has not changed. */
1230 mtCOVERAGE_TEST_MARKER();
1232 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1234 /* The queue is a member of a queue set, and posting
1235 * to the queue set caused a higher priority task to
1236 * unblock. A context switch is required. */
1237 if( pxHigherPriorityTaskWoken != NULL )
1239 *pxHigherPriorityTaskWoken = pdTRUE;
1243 mtCOVERAGE_TEST_MARKER();
1248 mtCOVERAGE_TEST_MARKER();
1253 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1255 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1257 /* The task waiting has a higher priority so
1258 * record that a context switch is required. */
1259 if( pxHigherPriorityTaskWoken != NULL )
1261 *pxHigherPriorityTaskWoken = pdTRUE;
1265 mtCOVERAGE_TEST_MARKER();
1270 mtCOVERAGE_TEST_MARKER();
1275 mtCOVERAGE_TEST_MARKER();
1279 #else /* configUSE_QUEUE_SETS */
1281 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1283 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1285 /* The task waiting has a higher priority so record that a
1286 * context switch is required. */
1287 if( pxHigherPriorityTaskWoken != NULL )
1289 *pxHigherPriorityTaskWoken = pdTRUE;
1293 mtCOVERAGE_TEST_MARKER();
1298 mtCOVERAGE_TEST_MARKER();
1303 mtCOVERAGE_TEST_MARKER();
1306 /* Not used in this path. */
1307 ( void ) uxPreviousMessagesWaiting;
1309 #endif /* configUSE_QUEUE_SETS */
1313 /* Increment the lock count so the task that unlocks the queue
1314 * knows that data was posted while it was locked. */
1315 prvIncrementQueueTxLock( pxQueue, cTxLock );
1322 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1323 xReturn = errQUEUE_FULL;
1326 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1328 traceRETURN_xQueueGenericSendFromISR( xReturn );
1332 /*-----------------------------------------------------------*/
1334 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
1335 BaseType_t * const pxHigherPriorityTaskWoken )
1338 UBaseType_t uxSavedInterruptStatus;
1339 Queue_t * const pxQueue = xQueue;
1341 traceENTER_xQueueGiveFromISR( xQueue, pxHigherPriorityTaskWoken );
1343 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1344 * item size is 0. Don't directly wake a task that was blocked on a queue
1345 * read, instead return a flag to say whether a context switch is required or
1346 * not (i.e. has a task with a higher priority than us been woken by this
1349 configASSERT( pxQueue );
1351 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1352 * if the item size is not 0. */
1353 configASSERT( pxQueue->uxItemSize == 0 );
1355 /* Normally a mutex would not be given from an interrupt, especially if
1356 * there is a mutex holder, as priority inheritance makes no sense for an
1357 * interrupts, only tasks. */
1358 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1360 /* RTOS ports that support interrupt nesting have the concept of a maximum
1361 * system call (or maximum API call) interrupt priority. Interrupts that are
1362 * above the maximum system call priority are kept permanently enabled, even
1363 * when the RTOS kernel is in a critical section, but cannot make any calls to
1364 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1365 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1366 * failure if a FreeRTOS API function is called from an interrupt that has been
1367 * assigned a priority above the configured maximum system call priority.
1368 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1369 * that have been assigned a priority at or (logically) below the maximum
1370 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1371 * safe API to ensure interrupt entry is as fast and as simple as possible.
1372 * More information (albeit Cortex-M specific) is provided on the following
1373 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1374 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1376 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1378 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1380 /* When the queue is used to implement a semaphore no data is ever
1381 * moved through the queue but it is still valid to see if the queue 'has
1383 if( uxMessagesWaiting < pxQueue->uxLength )
1385 const int8_t cTxLock = pxQueue->cTxLock;
1387 traceQUEUE_SEND_FROM_ISR( pxQueue );
1389 /* A task can only have an inherited priority if it is a mutex
1390 * holder - and if there is a mutex holder then the mutex cannot be
1391 * given from an ISR. As this is the ISR version of the function it
1392 * can be assumed there is no mutex holder and no need to determine if
1393 * priority disinheritance is needed. Simply increase the count of
1394 * messages (semaphores) available. */
1395 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
1397 /* The event list is not altered if the queue is locked. This will
1398 * be done when the queue is unlocked later. */
1399 if( cTxLock == queueUNLOCKED )
1401 #if ( configUSE_QUEUE_SETS == 1 )
1403 if( pxQueue->pxQueueSetContainer != NULL )
1405 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1407 /* The semaphore is a member of a queue set, and
1408 * posting to the queue set caused a higher priority
1409 * task to unblock. A context switch is required. */
1410 if( pxHigherPriorityTaskWoken != NULL )
1412 *pxHigherPriorityTaskWoken = pdTRUE;
1416 mtCOVERAGE_TEST_MARKER();
1421 mtCOVERAGE_TEST_MARKER();
1426 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1428 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1430 /* The task waiting has a higher priority so
1431 * record that a context switch is required. */
1432 if( pxHigherPriorityTaskWoken != NULL )
1434 *pxHigherPriorityTaskWoken = pdTRUE;
1438 mtCOVERAGE_TEST_MARKER();
1443 mtCOVERAGE_TEST_MARKER();
1448 mtCOVERAGE_TEST_MARKER();
1452 #else /* configUSE_QUEUE_SETS */
1454 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1456 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1458 /* The task waiting has a higher priority so record that a
1459 * context switch is required. */
1460 if( pxHigherPriorityTaskWoken != NULL )
1462 *pxHigherPriorityTaskWoken = pdTRUE;
1466 mtCOVERAGE_TEST_MARKER();
1471 mtCOVERAGE_TEST_MARKER();
1476 mtCOVERAGE_TEST_MARKER();
1479 #endif /* configUSE_QUEUE_SETS */
1483 /* Increment the lock count so the task that unlocks the queue
1484 * knows that data was posted while it was locked. */
1485 prvIncrementQueueTxLock( pxQueue, cTxLock );
1492 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1493 xReturn = errQUEUE_FULL;
1496 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1498 traceRETURN_xQueueGiveFromISR( xReturn );
1502 /*-----------------------------------------------------------*/
1504 BaseType_t xQueueReceive( QueueHandle_t xQueue,
1505 void * const pvBuffer,
1506 TickType_t xTicksToWait )
1508 BaseType_t xEntryTimeSet = pdFALSE;
1510 Queue_t * const pxQueue = xQueue;
1512 traceENTER_xQueueReceive( xQueue, pvBuffer, xTicksToWait );
1514 /* Check the pointer is not NULL. */
1515 configASSERT( ( pxQueue ) );
1517 /* The buffer into which data is received can only be NULL if the data size
1518 * is zero (so no data is copied into the buffer). */
1519 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1521 /* Cannot block if the scheduler is suspended. */
1522 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1524 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1528 /*lint -save -e904 This function relaxes the coding standard somewhat to
1529 * allow return statements within the function itself. This is done in the
1530 * interest of execution time efficiency. */
1533 taskENTER_CRITICAL();
1535 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1537 /* Is there data in the queue now? To be running the calling task
1538 * must be the highest priority task wanting to access the queue. */
1539 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1541 /* Data available, remove one item. */
1542 prvCopyDataFromQueue( pxQueue, pvBuffer );
1543 traceQUEUE_RECEIVE( pxQueue );
1544 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
1546 /* There is now space in the queue, were any tasks waiting to
1547 * post to the queue? If so, unblock the highest priority waiting
1549 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1551 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1553 queueYIELD_IF_USING_PREEMPTION();
1557 mtCOVERAGE_TEST_MARKER();
1562 mtCOVERAGE_TEST_MARKER();
1565 taskEXIT_CRITICAL();
1567 traceRETURN_xQueueReceive( pdPASS );
1573 if( xTicksToWait == ( TickType_t ) 0 )
1575 /* The queue was empty and no block time is specified (or
1576 * the block time has expired) so leave now. */
1577 taskEXIT_CRITICAL();
1579 traceQUEUE_RECEIVE_FAILED( pxQueue );
1580 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1582 return errQUEUE_EMPTY;
1584 else if( xEntryTimeSet == pdFALSE )
1586 /* The queue was empty and a block time was specified so
1587 * configure the timeout structure. */
1588 vTaskInternalSetTimeOutState( &xTimeOut );
1589 xEntryTimeSet = pdTRUE;
1593 /* Entry time was already set. */
1594 mtCOVERAGE_TEST_MARKER();
1598 taskEXIT_CRITICAL();
1600 /* Interrupts and other tasks can send to and receive from the queue
1601 * now the critical section has been exited. */
1604 prvLockQueue( pxQueue );
1606 /* Update the timeout state to see if it has expired yet. */
1607 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1609 /* The timeout has not expired. If the queue is still empty place
1610 * the task on the list of tasks waiting to receive from the queue. */
1611 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1613 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1614 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1615 prvUnlockQueue( pxQueue );
1617 if( xTaskResumeAll() == pdFALSE )
1619 #if ( configNUMBER_OF_CORES == 1 )
1621 portYIELD_WITHIN_API();
1623 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
1625 vTaskYieldWithinAPI();
1627 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
1631 mtCOVERAGE_TEST_MARKER();
1636 /* The queue contains data again. Loop back to try and read the
1638 prvUnlockQueue( pxQueue );
1639 ( void ) xTaskResumeAll();
1644 /* Timed out. If there is no data in the queue exit, otherwise loop
1645 * back and attempt to read the data. */
1646 prvUnlockQueue( pxQueue );
1647 ( void ) xTaskResumeAll();
1649 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1651 traceQUEUE_RECEIVE_FAILED( pxQueue );
1652 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1654 return errQUEUE_EMPTY;
1658 mtCOVERAGE_TEST_MARKER();
1661 } /*lint -restore */
1663 /*-----------------------------------------------------------*/
1665 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
1666 TickType_t xTicksToWait )
1668 BaseType_t xEntryTimeSet = pdFALSE;
1670 Queue_t * const pxQueue = xQueue;
1672 #if ( configUSE_MUTEXES == 1 )
1673 BaseType_t xInheritanceOccurred = pdFALSE;
1676 traceENTER_xQueueSemaphoreTake( xQueue, xTicksToWait );
1678 /* Check the queue pointer is not NULL. */
1679 configASSERT( ( pxQueue ) );
1681 /* Check this really is a semaphore, in which case the item size will be
1683 configASSERT( pxQueue->uxItemSize == 0 );
1685 /* Cannot block if the scheduler is suspended. */
1686 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1688 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1692 /*lint -save -e904 This function relaxes the coding standard somewhat to allow return
1693 * statements within the function itself. This is done in the interest
1694 * of execution time efficiency. */
1697 taskENTER_CRITICAL();
1699 /* Semaphores are queues with an item size of 0, and where the
1700 * number of messages in the queue is the semaphore's count value. */
1701 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1703 /* Is there data in the queue now? To be running the calling task
1704 * must be the highest priority task wanting to access the queue. */
1705 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1707 traceQUEUE_RECEIVE( pxQueue );
1709 /* Semaphores are queues with a data size of zero and where the
1710 * messages waiting is the semaphore's count. Reduce the count. */
1711 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxSemaphoreCount - ( UBaseType_t ) 1 );
1713 #if ( configUSE_MUTEXES == 1 )
1715 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1717 /* Record the information required to implement
1718 * priority inheritance should it become necessary. */
1719 pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1723 mtCOVERAGE_TEST_MARKER();
1726 #endif /* configUSE_MUTEXES */
1728 /* Check to see if other tasks are blocked waiting to give the
1729 * semaphore, and if so, unblock the highest priority such task. */
1730 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1732 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1734 queueYIELD_IF_USING_PREEMPTION();
1738 mtCOVERAGE_TEST_MARKER();
1743 mtCOVERAGE_TEST_MARKER();
1746 taskEXIT_CRITICAL();
1748 traceRETURN_xQueueSemaphoreTake( pdPASS );
1754 if( xTicksToWait == ( TickType_t ) 0 )
1756 /* The semaphore count was 0 and no block time is specified
1757 * (or the block time has expired) so exit now. */
1758 taskEXIT_CRITICAL();
1760 traceQUEUE_RECEIVE_FAILED( pxQueue );
1761 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1763 return errQUEUE_EMPTY;
1765 else if( xEntryTimeSet == pdFALSE )
1767 /* The semaphore count was 0 and a block time was specified
1768 * so configure the timeout structure ready to block. */
1769 vTaskInternalSetTimeOutState( &xTimeOut );
1770 xEntryTimeSet = pdTRUE;
1774 /* Entry time was already set. */
1775 mtCOVERAGE_TEST_MARKER();
1779 taskEXIT_CRITICAL();
1781 /* Interrupts and other tasks can give to and take from the semaphore
1782 * now the critical section has been exited. */
1785 prvLockQueue( pxQueue );
1787 /* Update the timeout state to see if it has expired yet. */
1788 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1790 /* A block time is specified and not expired. If the semaphore
1791 * count is 0 then enter the Blocked state to wait for a semaphore to
1792 * become available. As semaphores are implemented with queues the
1793 * queue being empty is equivalent to the semaphore count being 0. */
1794 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1796 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1798 #if ( configUSE_MUTEXES == 1 )
1800 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1802 taskENTER_CRITICAL();
1804 xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1806 taskEXIT_CRITICAL();
1810 mtCOVERAGE_TEST_MARKER();
1813 #endif /* if ( configUSE_MUTEXES == 1 ) */
1815 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1816 prvUnlockQueue( pxQueue );
1818 if( xTaskResumeAll() == pdFALSE )
1820 #if ( configNUMBER_OF_CORES == 1 )
1822 portYIELD_WITHIN_API();
1824 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
1826 vTaskYieldWithinAPI();
1828 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
1832 mtCOVERAGE_TEST_MARKER();
1837 /* There was no timeout and the semaphore count was not 0, so
1838 * attempt to take the semaphore again. */
1839 prvUnlockQueue( pxQueue );
1840 ( void ) xTaskResumeAll();
1846 prvUnlockQueue( pxQueue );
1847 ( void ) xTaskResumeAll();
1849 /* If the semaphore count is 0 exit now as the timeout has
1850 * expired. Otherwise return to attempt to take the semaphore that is
1851 * known to be available. As semaphores are implemented by queues the
1852 * queue being empty is equivalent to the semaphore count being 0. */
1853 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1855 #if ( configUSE_MUTEXES == 1 )
1857 /* xInheritanceOccurred could only have be set if
1858 * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1859 * test the mutex type again to check it is actually a mutex. */
1860 if( xInheritanceOccurred != pdFALSE )
1862 taskENTER_CRITICAL();
1864 UBaseType_t uxHighestWaitingPriority;
1866 /* This task blocking on the mutex caused another
1867 * task to inherit this task's priority. Now this task
1868 * has timed out the priority should be disinherited
1869 * again, but only as low as the next highest priority
1870 * task that is waiting for the same mutex. */
1871 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1872 vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1874 taskEXIT_CRITICAL();
1877 #endif /* configUSE_MUTEXES */
1879 traceQUEUE_RECEIVE_FAILED( pxQueue );
1880 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1882 return errQUEUE_EMPTY;
1886 mtCOVERAGE_TEST_MARKER();
1889 } /*lint -restore */
1891 /*-----------------------------------------------------------*/
1893 BaseType_t xQueuePeek( QueueHandle_t xQueue,
1894 void * const pvBuffer,
1895 TickType_t xTicksToWait )
1897 BaseType_t xEntryTimeSet = pdFALSE;
1899 int8_t * pcOriginalReadPosition;
1900 Queue_t * const pxQueue = xQueue;
1902 traceENTER_xQueuePeek( xQueue, pvBuffer, xTicksToWait );
1904 /* Check the pointer is not NULL. */
1905 configASSERT( ( pxQueue ) );
1907 /* The buffer into which data is received can only be NULL if the data size
1908 * is zero (so no data is copied into the buffer. */
1909 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1911 /* Cannot block if the scheduler is suspended. */
1912 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1914 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1918 /*lint -save -e904 This function relaxes the coding standard somewhat to
1919 * allow return statements within the function itself. This is done in the
1920 * interest of execution time efficiency. */
1923 taskENTER_CRITICAL();
1925 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1927 /* Is there data in the queue now? To be running the calling task
1928 * must be the highest priority task wanting to access the queue. */
1929 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1931 /* Remember the read position so it can be reset after the data
1932 * is read from the queue as this function is only peeking the
1933 * data, not removing it. */
1934 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1936 prvCopyDataFromQueue( pxQueue, pvBuffer );
1937 traceQUEUE_PEEK( pxQueue );
1939 /* The data is not being removed, so reset the read pointer. */
1940 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1942 /* The data is being left in the queue, so see if there are
1943 * any other tasks waiting for the data. */
1944 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1946 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1948 /* The task waiting has a higher priority than this task. */
1949 queueYIELD_IF_USING_PREEMPTION();
1953 mtCOVERAGE_TEST_MARKER();
1958 mtCOVERAGE_TEST_MARKER();
1961 taskEXIT_CRITICAL();
1963 traceRETURN_xQueuePeek( pdPASS );
1969 if( xTicksToWait == ( TickType_t ) 0 )
1971 /* The queue was empty and no block time is specified (or
1972 * the block time has expired) so leave now. */
1973 taskEXIT_CRITICAL();
1975 traceQUEUE_PEEK_FAILED( pxQueue );
1976 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
1978 return errQUEUE_EMPTY;
1980 else if( xEntryTimeSet == pdFALSE )
1982 /* The queue was empty and a block time was specified so
1983 * configure the timeout structure ready to enter the blocked
1985 vTaskInternalSetTimeOutState( &xTimeOut );
1986 xEntryTimeSet = pdTRUE;
1990 /* Entry time was already set. */
1991 mtCOVERAGE_TEST_MARKER();
1995 taskEXIT_CRITICAL();
1997 /* Interrupts and other tasks can send to and receive from the queue
1998 * now that the critical section has been exited. */
2001 prvLockQueue( pxQueue );
2003 /* Update the timeout state to see if it has expired yet. */
2004 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
2006 /* Timeout has not expired yet, check to see if there is data in the
2007 * queue now, and if not enter the Blocked state to wait for data. */
2008 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2010 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
2011 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
2012 prvUnlockQueue( pxQueue );
2014 if( xTaskResumeAll() == pdFALSE )
2016 #if ( configNUMBER_OF_CORES == 1 )
2018 portYIELD_WITHIN_API();
2020 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
2022 vTaskYieldWithinAPI();
2024 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
2028 mtCOVERAGE_TEST_MARKER();
2033 /* There is data in the queue now, so don't enter the blocked
2034 * state, instead return to try and obtain the data. */
2035 prvUnlockQueue( pxQueue );
2036 ( void ) xTaskResumeAll();
2041 /* The timeout has expired. If there is still no data in the queue
2042 * exit, otherwise go back and try to read the data again. */
2043 prvUnlockQueue( pxQueue );
2044 ( void ) xTaskResumeAll();
2046 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2048 traceQUEUE_PEEK_FAILED( pxQueue );
2049 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
2051 return errQUEUE_EMPTY;
2055 mtCOVERAGE_TEST_MARKER();
2058 } /*lint -restore */
2060 /*-----------------------------------------------------------*/
2062 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
2063 void * const pvBuffer,
2064 BaseType_t * const pxHigherPriorityTaskWoken )
2067 UBaseType_t uxSavedInterruptStatus;
2068 Queue_t * const pxQueue = xQueue;
2070 traceENTER_xQueueReceiveFromISR( xQueue, pvBuffer, pxHigherPriorityTaskWoken );
2072 configASSERT( pxQueue );
2073 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2075 /* RTOS ports that support interrupt nesting have the concept of a maximum
2076 * system call (or maximum API call) interrupt priority. Interrupts that are
2077 * above the maximum system call priority are kept permanently enabled, even
2078 * when the RTOS kernel is in a critical section, but cannot make any calls to
2079 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2080 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2081 * failure if a FreeRTOS API function is called from an interrupt that has been
2082 * assigned a priority above the configured maximum system call priority.
2083 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2084 * that have been assigned a priority at or (logically) below the maximum
2085 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2086 * safe API to ensure interrupt entry is as fast and as simple as possible.
2087 * More information (albeit Cortex-M specific) is provided on the following
2088 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2089 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2091 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2093 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2095 /* Cannot block in an ISR, so check there is data available. */
2096 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2098 const int8_t cRxLock = pxQueue->cRxLock;
2100 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
2102 prvCopyDataFromQueue( pxQueue, pvBuffer );
2103 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
2105 /* If the queue is locked the event list will not be modified.
2106 * Instead update the lock count so the task that unlocks the queue
2107 * will know that an ISR has removed data while the queue was
2109 if( cRxLock == queueUNLOCKED )
2111 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2113 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2115 /* The task waiting has a higher priority than us so
2116 * force a context switch. */
2117 if( pxHigherPriorityTaskWoken != NULL )
2119 *pxHigherPriorityTaskWoken = pdTRUE;
2123 mtCOVERAGE_TEST_MARKER();
2128 mtCOVERAGE_TEST_MARKER();
2133 mtCOVERAGE_TEST_MARKER();
2138 /* Increment the lock count so the task that unlocks the queue
2139 * knows that data was removed while it was locked. */
2140 prvIncrementQueueRxLock( pxQueue, cRxLock );
2148 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
2151 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2153 traceRETURN_xQueueReceiveFromISR( xReturn );
2157 /*-----------------------------------------------------------*/
2159 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
2160 void * const pvBuffer )
2163 UBaseType_t uxSavedInterruptStatus;
2164 int8_t * pcOriginalReadPosition;
2165 Queue_t * const pxQueue = xQueue;
2167 traceENTER_xQueuePeekFromISR( xQueue, pvBuffer );
2169 configASSERT( pxQueue );
2170 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2171 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
2173 /* RTOS ports that support interrupt nesting have the concept of a maximum
2174 * system call (or maximum API call) interrupt priority. Interrupts that are
2175 * above the maximum system call priority are kept permanently enabled, even
2176 * when the RTOS kernel is in a critical section, but cannot make any calls to
2177 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2178 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2179 * failure if a FreeRTOS API function is called from an interrupt that has been
2180 * assigned a priority above the configured maximum system call priority.
2181 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2182 * that have been assigned a priority at or (logically) below the maximum
2183 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2184 * safe API to ensure interrupt entry is as fast and as simple as possible.
2185 * More information (albeit Cortex-M specific) is provided on the following
2186 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2187 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2189 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2191 /* Cannot block in an ISR, so check there is data available. */
2192 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2194 traceQUEUE_PEEK_FROM_ISR( pxQueue );
2196 /* Remember the read position so it can be reset as nothing is
2197 * actually being removed from the queue. */
2198 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
2199 prvCopyDataFromQueue( pxQueue, pvBuffer );
2200 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
2207 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
2210 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2212 traceRETURN_xQueuePeekFromISR( xReturn );
2216 /*-----------------------------------------------------------*/
2218 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
2220 UBaseType_t uxReturn;
2222 traceENTER_uxQueueMessagesWaiting( xQueue );
2224 configASSERT( xQueue );
2226 taskENTER_CRITICAL();
2228 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
2230 taskEXIT_CRITICAL();
2232 traceRETURN_uxQueueMessagesWaiting( uxReturn );
2235 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2236 /*-----------------------------------------------------------*/
2238 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
2240 UBaseType_t uxReturn;
2241 Queue_t * const pxQueue = xQueue;
2243 traceENTER_uxQueueSpacesAvailable( xQueue );
2245 configASSERT( pxQueue );
2247 taskENTER_CRITICAL();
2249 uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
2251 taskEXIT_CRITICAL();
2253 traceRETURN_uxQueueSpacesAvailable( uxReturn );
2256 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2257 /*-----------------------------------------------------------*/
2259 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
2261 UBaseType_t uxReturn;
2262 Queue_t * const pxQueue = xQueue;
2264 traceENTER_uxQueueMessagesWaitingFromISR( xQueue );
2266 configASSERT( pxQueue );
2267 uxReturn = pxQueue->uxMessagesWaiting;
2269 traceRETURN_uxQueueMessagesWaitingFromISR( uxReturn );
2272 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2273 /*-----------------------------------------------------------*/
2275 void vQueueDelete( QueueHandle_t xQueue )
2277 Queue_t * const pxQueue = xQueue;
2279 traceENTER_vQueueDelete( xQueue );
2281 configASSERT( pxQueue );
2282 traceQUEUE_DELETE( pxQueue );
2284 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2286 vQueueUnregisterQueue( pxQueue );
2290 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2292 /* The queue can only have been allocated dynamically - free it
2294 vPortFree( pxQueue );
2296 #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2298 /* The queue could have been allocated statically or dynamically, so
2299 * check before attempting to free the memory. */
2300 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2302 vPortFree( pxQueue );
2306 mtCOVERAGE_TEST_MARKER();
2309 #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
2311 /* The queue must have been statically allocated, so is not going to be
2312 * deleted. Avoid compiler warnings about the unused parameter. */
2315 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2317 traceRETURN_vQueueDelete();
2319 /*-----------------------------------------------------------*/
2321 #if ( configUSE_TRACE_FACILITY == 1 )
2323 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2325 traceENTER_uxQueueGetQueueNumber( xQueue );
2327 traceRETURN_uxQueueGetQueueNumber( ( ( Queue_t * ) xQueue )->uxQueueNumber );
2329 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2332 #endif /* configUSE_TRACE_FACILITY */
2333 /*-----------------------------------------------------------*/
2335 #if ( configUSE_TRACE_FACILITY == 1 )
2337 void vQueueSetQueueNumber( QueueHandle_t xQueue,
2338 UBaseType_t uxQueueNumber )
2340 traceENTER_vQueueSetQueueNumber( xQueue, uxQueueNumber );
2342 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2344 traceRETURN_vQueueSetQueueNumber();
2347 #endif /* configUSE_TRACE_FACILITY */
2348 /*-----------------------------------------------------------*/
2350 #if ( configUSE_TRACE_FACILITY == 1 )
2352 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2354 traceENTER_ucQueueGetQueueType( xQueue );
2356 traceRETURN_ucQueueGetQueueType( ( ( Queue_t * ) xQueue )->ucQueueType );
2358 return ( ( Queue_t * ) xQueue )->ucQueueType;
2361 #endif /* configUSE_TRACE_FACILITY */
2362 /*-----------------------------------------------------------*/
2364 UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2366 traceENTER_uxQueueGetQueueItemSize( xQueue );
2368 traceRETURN_uxQueueGetQueueItemSize( ( ( Queue_t * ) xQueue )->uxItemSize );
2370 return ( ( Queue_t * ) xQueue )->uxItemSize;
2372 /*-----------------------------------------------------------*/
2374 UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2376 traceENTER_uxQueueGetQueueLength( xQueue );
2378 traceRETURN_uxQueueGetQueueLength( ( ( Queue_t * ) xQueue )->uxLength );
2380 return ( ( Queue_t * ) xQueue )->uxLength;
2382 /*-----------------------------------------------------------*/
2384 #if ( configUSE_MUTEXES == 1 )
2386 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2388 UBaseType_t uxHighestPriorityOfWaitingTasks;
2390 /* If a task waiting for a mutex causes the mutex holder to inherit a
2391 * priority, but the waiting task times out, then the holder should
2392 * disinherit the priority - but only down to the highest priority of any
2393 * other tasks that are waiting for the same mutex. For this purpose,
2394 * return the priority of the highest priority task that is waiting for the
2396 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2398 uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) ( ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ) );
2402 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2405 return uxHighestPriorityOfWaitingTasks;
2408 #endif /* configUSE_MUTEXES */
2409 /*-----------------------------------------------------------*/
2411 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
2412 const void * pvItemToQueue,
2413 const BaseType_t xPosition )
2415 BaseType_t xReturn = pdFALSE;
2416 UBaseType_t uxMessagesWaiting;
2418 /* This function is called from a critical section. */
2420 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2422 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2424 #if ( configUSE_MUTEXES == 1 )
2426 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2428 /* The mutex is no longer being held. */
2429 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2430 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2434 mtCOVERAGE_TEST_MARKER();
2437 #endif /* configUSE_MUTEXES */
2439 else if( xPosition == queueSEND_TO_BACK )
2441 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2442 pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2444 if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2446 pxQueue->pcWriteTo = pxQueue->pcHead;
2450 mtCOVERAGE_TEST_MARKER();
2455 ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
2456 pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2458 if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2460 pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2464 mtCOVERAGE_TEST_MARKER();
2467 if( xPosition == queueOVERWRITE )
2469 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2471 /* An item is not being added but overwritten, so subtract
2472 * one from the recorded number of items in the queue so when
2473 * one is added again below the number of recorded items remains
2475 --uxMessagesWaiting;
2479 mtCOVERAGE_TEST_MARKER();
2484 mtCOVERAGE_TEST_MARKER();
2488 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
2492 /*-----------------------------------------------------------*/
2494 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
2495 void * const pvBuffer )
2497 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2499 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2501 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
2503 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2507 mtCOVERAGE_TEST_MARKER();
2510 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2513 /*-----------------------------------------------------------*/
2515 static void prvUnlockQueue( Queue_t * const pxQueue )
2517 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2519 /* The lock counts contains the number of extra data items placed or
2520 * removed from the queue while the queue was locked. When a queue is
2521 * locked items can be added or removed, but the event lists cannot be
2523 taskENTER_CRITICAL();
2525 int8_t cTxLock = pxQueue->cTxLock;
2527 /* See if data was added to the queue while it was locked. */
2528 while( cTxLock > queueLOCKED_UNMODIFIED )
2530 /* Data was posted while the queue was locked. Are any tasks
2531 * blocked waiting for data to become available? */
2532 #if ( configUSE_QUEUE_SETS == 1 )
2534 if( pxQueue->pxQueueSetContainer != NULL )
2536 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
2538 /* The queue is a member of a queue set, and posting to
2539 * the queue set caused a higher priority task to unblock.
2540 * A context switch is required. */
2545 mtCOVERAGE_TEST_MARKER();
2550 /* Tasks that are removed from the event list will get
2551 * added to the pending ready list as the scheduler is still
2553 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2555 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2557 /* The task waiting has a higher priority so record that a
2558 * context switch is required. */
2563 mtCOVERAGE_TEST_MARKER();
2572 #else /* configUSE_QUEUE_SETS */
2574 /* Tasks that are removed from the event list will get added to
2575 * the pending ready list as the scheduler is still suspended. */
2576 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2578 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2580 /* The task waiting has a higher priority so record that
2581 * a context switch is required. */
2586 mtCOVERAGE_TEST_MARKER();
2594 #endif /* configUSE_QUEUE_SETS */
2599 pxQueue->cTxLock = queueUNLOCKED;
2601 taskEXIT_CRITICAL();
2603 /* Do the same for the Rx lock. */
2604 taskENTER_CRITICAL();
2606 int8_t cRxLock = pxQueue->cRxLock;
2608 while( cRxLock > queueLOCKED_UNMODIFIED )
2610 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2612 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2618 mtCOVERAGE_TEST_MARKER();
2629 pxQueue->cRxLock = queueUNLOCKED;
2631 taskEXIT_CRITICAL();
2633 /*-----------------------------------------------------------*/
2635 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
2639 taskENTER_CRITICAL();
2641 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2650 taskEXIT_CRITICAL();
2654 /*-----------------------------------------------------------*/
2656 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2659 Queue_t * const pxQueue = xQueue;
2661 traceENTER_xQueueIsQueueEmptyFromISR( xQueue );
2663 configASSERT( pxQueue );
2665 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2674 traceRETURN_xQueueIsQueueEmptyFromISR( xReturn );
2677 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2678 /*-----------------------------------------------------------*/
2680 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
2684 taskENTER_CRITICAL();
2686 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2695 taskEXIT_CRITICAL();
2699 /*-----------------------------------------------------------*/
2701 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2704 Queue_t * const pxQueue = xQueue;
2706 traceENTER_xQueueIsQueueFullFromISR( xQueue );
2708 configASSERT( pxQueue );
2710 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2719 traceRETURN_xQueueIsQueueFullFromISR( xReturn );
2722 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2723 /*-----------------------------------------------------------*/
2725 #if ( configUSE_CO_ROUTINES == 1 )
2727 BaseType_t xQueueCRSend( QueueHandle_t xQueue,
2728 const void * pvItemToQueue,
2729 TickType_t xTicksToWait )
2732 Queue_t * const pxQueue = xQueue;
2734 traceENTER_xQueueCRSend( xQueue, pvItemToQueue, xTicksToWait );
2736 /* If the queue is already full we may have to block. A critical section
2737 * is required to prevent an interrupt removing something from the queue
2738 * between the check to see if the queue is full and blocking on the queue. */
2739 portDISABLE_INTERRUPTS();
2741 if( prvIsQueueFull( pxQueue ) != pdFALSE )
2743 /* The queue is full - do we want to block or just leave without
2745 if( xTicksToWait > ( TickType_t ) 0 )
2747 /* As this is called from a coroutine we cannot block directly, but
2748 * return indicating that we need to block. */
2749 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2750 portENABLE_INTERRUPTS();
2751 return errQUEUE_BLOCKED;
2755 portENABLE_INTERRUPTS();
2756 return errQUEUE_FULL;
2760 portENABLE_INTERRUPTS();
2762 portDISABLE_INTERRUPTS();
2764 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2766 /* There is room in the queue, copy the data into the queue. */
2767 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2770 /* Were any co-routines waiting for data to become available? */
2771 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2773 /* In this instance the co-routine could be placed directly
2774 * into the ready list as we are within a critical section.
2775 * Instead the same pending ready list mechanism is used as if
2776 * the event were caused from within an interrupt. */
2777 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2779 /* The co-routine waiting has a higher priority so record
2780 * that a yield might be appropriate. */
2781 xReturn = errQUEUE_YIELD;
2785 mtCOVERAGE_TEST_MARKER();
2790 mtCOVERAGE_TEST_MARKER();
2795 xReturn = errQUEUE_FULL;
2798 portENABLE_INTERRUPTS();
2800 traceRETURN_xQueueCRSend( xReturn );
2805 #endif /* configUSE_CO_ROUTINES */
2806 /*-----------------------------------------------------------*/
2808 #if ( configUSE_CO_ROUTINES == 1 )
2810 BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
2812 TickType_t xTicksToWait )
2815 Queue_t * const pxQueue = xQueue;
2817 traceENTER_xQueueCRReceive( xQueue, pvBuffer, xTicksToWait );
2819 /* If the queue is already empty we may have to block. A critical section
2820 * is required to prevent an interrupt adding something to the queue
2821 * between the check to see if the queue is empty and blocking on the queue. */
2822 portDISABLE_INTERRUPTS();
2824 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2826 /* There are no messages in the queue, do we want to block or just
2827 * leave with nothing? */
2828 if( xTicksToWait > ( TickType_t ) 0 )
2830 /* As this is a co-routine we cannot block directly, but return
2831 * indicating that we need to block. */
2832 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2833 portENABLE_INTERRUPTS();
2834 return errQUEUE_BLOCKED;
2838 portENABLE_INTERRUPTS();
2839 return errQUEUE_FULL;
2844 mtCOVERAGE_TEST_MARKER();
2847 portENABLE_INTERRUPTS();
2849 portDISABLE_INTERRUPTS();
2851 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2853 /* Data is available from the queue. */
2854 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2856 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2858 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2862 mtCOVERAGE_TEST_MARKER();
2865 --( pxQueue->uxMessagesWaiting );
2866 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2870 /* Were any co-routines waiting for space to become available? */
2871 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2873 /* In this instance the co-routine could be placed directly
2874 * into the ready list as we are within a critical section.
2875 * Instead the same pending ready list mechanism is used as if
2876 * the event were caused from within an interrupt. */
2877 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2879 xReturn = errQUEUE_YIELD;
2883 mtCOVERAGE_TEST_MARKER();
2888 mtCOVERAGE_TEST_MARKER();
2896 portENABLE_INTERRUPTS();
2898 traceRETURN_xQueueCRReceive( xReturn );
2903 #endif /* configUSE_CO_ROUTINES */
2904 /*-----------------------------------------------------------*/
2906 #if ( configUSE_CO_ROUTINES == 1 )
2908 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue,
2909 const void * pvItemToQueue,
2910 BaseType_t xCoRoutinePreviouslyWoken )
2912 Queue_t * const pxQueue = xQueue;
2914 traceENTER_xQueueCRSendFromISR( xQueue, pvItemToQueue, xCoRoutinePreviouslyWoken );
2916 /* Cannot block within an ISR so if there is no space on the queue then
2917 * exit without doing anything. */
2918 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2920 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2922 /* We only want to wake one co-routine per ISR, so check that a
2923 * co-routine has not already been woken. */
2924 if( xCoRoutinePreviouslyWoken == pdFALSE )
2926 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2928 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2934 mtCOVERAGE_TEST_MARKER();
2939 mtCOVERAGE_TEST_MARKER();
2944 mtCOVERAGE_TEST_MARKER();
2949 mtCOVERAGE_TEST_MARKER();
2952 traceRETURN_xQueueCRSendFromISR( xCoRoutinePreviouslyWoken );
2954 return xCoRoutinePreviouslyWoken;
2957 #endif /* configUSE_CO_ROUTINES */
2958 /*-----------------------------------------------------------*/
2960 #if ( configUSE_CO_ROUTINES == 1 )
2962 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue,
2964 BaseType_t * pxCoRoutineWoken )
2967 Queue_t * const pxQueue = xQueue;
2969 traceENTER_xQueueCRReceiveFromISR( xQueue, pvBuffer, pxCoRoutineWoken );
2971 /* We cannot block from an ISR, so check there is data available. If
2972 * not then just leave without doing anything. */
2973 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2975 /* Copy the data from the queue. */
2976 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2978 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2980 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2984 mtCOVERAGE_TEST_MARKER();
2987 --( pxQueue->uxMessagesWaiting );
2988 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2990 if( ( *pxCoRoutineWoken ) == pdFALSE )
2992 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2994 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2996 *pxCoRoutineWoken = pdTRUE;
3000 mtCOVERAGE_TEST_MARKER();
3005 mtCOVERAGE_TEST_MARKER();
3010 mtCOVERAGE_TEST_MARKER();
3020 traceRETURN_xQueueCRReceiveFromISR( xReturn );
3025 #endif /* configUSE_CO_ROUTINES */
3026 /*-----------------------------------------------------------*/
3028 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3030 void vQueueAddToRegistry( QueueHandle_t xQueue,
3031 const char * pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3034 QueueRegistryItem_t * pxEntryToWrite = NULL;
3036 traceENTER_vQueueAddToRegistry( xQueue, pcQueueName );
3038 configASSERT( xQueue );
3040 if( pcQueueName != NULL )
3042 /* See if there is an empty space in the registry. A NULL name denotes
3044 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3046 /* Replace an existing entry if the queue is already in the registry. */
3047 if( xQueue == xQueueRegistry[ ux ].xHandle )
3049 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3052 /* Otherwise, store in the next empty location */
3053 else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) )
3055 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3059 mtCOVERAGE_TEST_MARKER();
3064 if( pxEntryToWrite != NULL )
3066 /* Store the information on this queue. */
3067 pxEntryToWrite->pcQueueName = pcQueueName;
3068 pxEntryToWrite->xHandle = xQueue;
3070 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
3073 traceRETURN_vQueueAddToRegistry();
3076 #endif /* configQUEUE_REGISTRY_SIZE */
3077 /*-----------------------------------------------------------*/
3079 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3081 const char * pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3084 const char * pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3086 traceENTER_pcQueueGetName( xQueue );
3088 configASSERT( xQueue );
3090 /* Note there is nothing here to protect against another task adding or
3091 * removing entries from the registry while it is being searched. */
3093 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3095 if( xQueueRegistry[ ux ].xHandle == xQueue )
3097 pcReturn = xQueueRegistry[ ux ].pcQueueName;
3102 mtCOVERAGE_TEST_MARKER();
3106 traceRETURN_pcQueueGetName( pcReturn );
3109 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
3111 #endif /* configQUEUE_REGISTRY_SIZE */
3112 /*-----------------------------------------------------------*/
3114 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3116 void vQueueUnregisterQueue( QueueHandle_t xQueue )
3120 traceENTER_vQueueUnregisterQueue( xQueue );
3122 configASSERT( xQueue );
3124 /* See if the handle of the queue being unregistered in actually in the
3126 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3128 if( xQueueRegistry[ ux ].xHandle == xQueue )
3130 /* Set the name to NULL to show that this slot if free again. */
3131 xQueueRegistry[ ux ].pcQueueName = NULL;
3133 /* Set the handle to NULL to ensure the same queue handle cannot
3134 * appear in the registry twice if it is added, removed, then
3136 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
3141 mtCOVERAGE_TEST_MARKER();
3145 traceRETURN_vQueueUnregisterQueue();
3146 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
3148 #endif /* configQUEUE_REGISTRY_SIZE */
3149 /*-----------------------------------------------------------*/
3151 #if ( configUSE_TIMERS == 1 )
3153 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
3154 TickType_t xTicksToWait,
3155 const BaseType_t xWaitIndefinitely )
3157 Queue_t * const pxQueue = xQueue;
3159 traceENTER_vQueueWaitForMessageRestricted( xQueue, xTicksToWait, xWaitIndefinitely );
3161 /* This function should not be called by application code hence the
3162 * 'Restricted' in its name. It is not part of the public API. It is
3163 * designed for use by kernel code, and has special calling requirements.
3164 * It can result in vListInsert() being called on a list that can only
3165 * possibly ever have one item in it, so the list will be fast, but even
3166 * so it should be called with the scheduler locked and not from a critical
3169 /* Only do anything if there are no messages in the queue. This function
3170 * will not actually cause the task to block, just place it on a blocked
3171 * list. It will not block until the scheduler is unlocked - at which
3172 * time a yield will be performed. If an item is added to the queue while
3173 * the queue is locked, and the calling task blocks on the queue, then the
3174 * calling task will be immediately unblocked when the queue is unlocked. */
3175 prvLockQueue( pxQueue );
3177 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
3179 /* There is nothing in the queue, block for the specified period. */
3180 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
3184 mtCOVERAGE_TEST_MARKER();
3187 prvUnlockQueue( pxQueue );
3189 traceRETURN_vQueueWaitForMessageRestricted();
3192 #endif /* configUSE_TIMERS */
3193 /*-----------------------------------------------------------*/
3195 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
3197 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
3199 QueueSetHandle_t pxQueue;
3201 traceENTER_xQueueCreateSet( uxEventQueueLength );
3203 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
3205 traceRETURN_xQueueCreateSet( pxQueue );
3210 #endif /* configUSE_QUEUE_SETS */
3211 /*-----------------------------------------------------------*/
3213 #if ( configUSE_QUEUE_SETS == 1 )
3215 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3216 QueueSetHandle_t xQueueSet )
3220 traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
3222 taskENTER_CRITICAL();
3224 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
3226 /* Cannot add a queue/semaphore to more than one queue set. */
3229 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
3231 /* Cannot add a queue/semaphore to a queue set if there are already
3232 * items in the queue/semaphore. */
3237 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
3241 taskEXIT_CRITICAL();
3243 traceRETURN_xQueueAddToSet( xReturn );
3248 #endif /* configUSE_QUEUE_SETS */
3249 /*-----------------------------------------------------------*/
3251 #if ( configUSE_QUEUE_SETS == 1 )
3253 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3254 QueueSetHandle_t xQueueSet )
3257 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
3259 traceENTER_xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet );
3261 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
3263 /* The queue was not a member of the set. */
3266 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
3268 /* It is dangerous to remove a queue from a set when the queue is
3269 * not empty because the queue set will still hold pending events for
3275 taskENTER_CRITICAL();
3277 /* The queue is no longer contained in the set. */
3278 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
3280 taskEXIT_CRITICAL();
3284 traceRETURN_xQueueRemoveFromSet( xReturn );
3287 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
3289 #endif /* configUSE_QUEUE_SETS */
3290 /*-----------------------------------------------------------*/
3292 #if ( configUSE_QUEUE_SETS == 1 )
3294 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
3295 TickType_t const xTicksToWait )
3297 QueueSetMemberHandle_t xReturn = NULL;
3299 traceENTER_xQueueSelectFromSet( xQueueSet, xTicksToWait );
3301 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
3303 traceRETURN_xQueueSelectFromSet( xReturn );
3308 #endif /* configUSE_QUEUE_SETS */
3309 /*-----------------------------------------------------------*/
3311 #if ( configUSE_QUEUE_SETS == 1 )
3313 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
3315 QueueSetMemberHandle_t xReturn = NULL;
3317 traceENTER_xQueueSelectFromSetFromISR( xQueueSet );
3319 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
3321 traceRETURN_xQueueSelectFromSetFromISR( xReturn );
3326 #endif /* configUSE_QUEUE_SETS */
3327 /*-----------------------------------------------------------*/
3329 #if ( configUSE_QUEUE_SETS == 1 )
3331 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
3333 Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
3334 BaseType_t xReturn = pdFALSE;
3336 /* This function must be called form a critical section. */
3338 /* The following line is not reachable in unit tests because every call
3339 * to prvNotifyQueueSetContainer is preceded by a check that
3340 * pxQueueSetContainer != NULL */
3341 configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
3342 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
3344 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
3346 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
3348 traceQUEUE_SET_SEND( pxQueueSetContainer );
3350 /* The data copied is the handle of the queue that contains data. */
3351 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
3353 if( cTxLock == queueUNLOCKED )
3355 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
3357 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
3359 /* The task waiting has a higher priority. */
3364 mtCOVERAGE_TEST_MARKER();
3369 mtCOVERAGE_TEST_MARKER();
3374 prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
3379 mtCOVERAGE_TEST_MARKER();
3385 #endif /* configUSE_QUEUE_SETS */