2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * SPDX-License-Identifier: MIT
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 * all the API functions to use the MPU wrappers. That should only be done when
34 * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
41 #if ( configUSE_CO_ROUTINES == 1 )
45 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
46 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
47 * for the header files above, but not in this file, in order to generate the
48 * correct privileged Vs unprivileged linkage and placement. */
49 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
52 /* Constants used with the cRxLock and cTxLock structure members. */
53 #define queueUNLOCKED ( ( int8_t ) -1 )
54 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
55 #define queueINT8_MAX ( ( int8_t ) 127 )
57 /* When the Queue_t structure is used to represent a base queue its pcHead and
58 * pcTail members are used as pointers into the queue storage area. When the
59 * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
60 * not necessary, and the pcHead pointer is set to NULL to indicate that the
61 * structure instead holds a pointer to the mutex holder (if any). Map alternative
62 * names to the pcHead and structure member to ensure the readability of the code
63 * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form
64 * a union as their usage is mutually exclusive dependent on what the queue is
66 #define uxQueueType pcHead
67 #define queueQUEUE_IS_MUTEX NULL
69 typedef struct QueuePointers
71 int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
72 int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
75 typedef struct SemaphoreData
77 TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */
78 UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
81 /* Semaphores do not actually store or copy data, so have an item size of
83 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
84 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
86 #if ( configUSE_PREEMPTION == 0 )
88 /* If the cooperative scheduler is being used then a yield should not be
89 * performed just because a higher priority task has been woken. */
90 #define queueYIELD_IF_USING_PREEMPTION()
92 #if ( configNUMBER_OF_CORES == 1 )
93 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
94 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
95 #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI()
96 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
100 * Definition of the queue used by the scheduler.
101 * Items are queued by copy, not reference. See the following link for the
102 * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
104 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
106 int8_t * pcHead; /**< Points to the beginning of the queue storage area. */
107 int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */
111 QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */
112 SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
115 List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
116 List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
118 volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
119 UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
120 UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */
122 volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
123 volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
125 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
126 uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
129 #if ( configUSE_QUEUE_SETS == 1 )
130 struct QueueDefinition * pxQueueSetContainer;
133 #if ( configUSE_TRACE_FACILITY == 1 )
134 UBaseType_t uxQueueNumber;
139 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
140 * name below to enable the use of older kernel aware debuggers. */
141 typedef xQUEUE Queue_t;
143 /*-----------------------------------------------------------*/
146 * The queue registry is just a means for kernel aware debuggers to locate
147 * queue structures. It has no other purpose so is an optional component.
149 #if ( configQUEUE_REGISTRY_SIZE > 0 )
151 /* The type stored within the queue registry array. This allows a name
152 * to be assigned to each queue making kernel aware debugging a little
153 * more user friendly. */
154 typedef struct QUEUE_REGISTRY_ITEM
156 const char * pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
157 QueueHandle_t xHandle;
158 } xQueueRegistryItem;
160 /* The old xQueueRegistryItem name is maintained above then typedefed to the
161 * new xQueueRegistryItem name below to enable the use of older kernel aware
163 typedef xQueueRegistryItem QueueRegistryItem_t;
165 /* The queue registry is simply an array of QueueRegistryItem_t structures.
166 * The pcQueueName member of a structure being NULL is indicative of the
167 * array position being vacant. */
168 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
170 #endif /* configQUEUE_REGISTRY_SIZE */
173 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
174 * prevent an ISR from adding or removing items to the queue, but does prevent
175 * an ISR from removing tasks from the queue event lists. If an ISR finds a
176 * queue is locked it will instead increment the appropriate queue lock count
177 * to indicate that a task may require unblocking. When the queue in unlocked
178 * these lock counts are inspected, and the appropriate action taken.
180 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
183 * Uses a critical section to determine if there is any data in a queue.
185 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
187 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
190 * Uses a critical section to determine if there is any space in a queue.
192 * @return pdTRUE if there is no space, otherwise pdFALSE;
194 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
197 * Copies an item into the queue, either at the front of the queue or the
200 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
201 const void * pvItemToQueue,
202 const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
205 * Copies an item out of a queue.
207 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
208 void * const pvBuffer ) PRIVILEGED_FUNCTION;
210 #if ( configUSE_QUEUE_SETS == 1 )
213 * Checks to see if a queue is a member of a queue set, and if so, notifies
214 * the queue set that the queue contains data.
216 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
220 * Called after a Queue_t structure has been allocated either statically or
221 * dynamically to fill in the structure's members.
223 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
224 const UBaseType_t uxItemSize,
225 uint8_t * pucQueueStorage,
226 const uint8_t ucQueueType,
227 Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
230 * Mutexes are a special type of queue. When a mutex is created, first the
231 * queue is created, then prvInitialiseMutex() is called to configure the queue
234 #if ( configUSE_MUTEXES == 1 )
235 static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
238 #if ( configUSE_MUTEXES == 1 )
241 * If a task waiting for a mutex causes the mutex holder to inherit a
242 * priority, but the waiting task times out, then the holder should
243 * disinherit the priority - but only down to the highest priority of any
244 * other tasks that are waiting for the same mutex. This function returns
247 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
249 /*-----------------------------------------------------------*/
252 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
253 * accessing the queue event lists.
255 #define prvLockQueue( pxQueue ) \
256 taskENTER_CRITICAL(); \
258 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
260 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
262 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
264 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
270 * Macro to increment cTxLock member of the queue data structure. It is
271 * capped at the number of tasks in the system as we cannot unblock more
272 * tasks than the number of tasks in the system.
274 #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \
276 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
277 if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \
279 configASSERT( ( cTxLock ) != queueINT8_MAX ); \
280 ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
285 * Macro to increment cRxLock member of the queue data structure. It is
286 * capped at the number of tasks in the system as we cannot unblock more
287 * tasks than the number of tasks in the system.
289 #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \
291 const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \
292 if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \
294 configASSERT( ( cRxLock ) != queueINT8_MAX ); \
295 ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
298 /*-----------------------------------------------------------*/
300 BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
301 BaseType_t xNewQueue )
303 BaseType_t xReturn = pdPASS;
304 Queue_t * const pxQueue = xQueue;
306 traceENTER_xQueueGenericReset( xQueue, xNewQueue );
308 configASSERT( pxQueue );
310 if( ( pxQueue != NULL ) &&
311 ( pxQueue->uxLength >= 1U ) &&
312 /* Check for multiplication overflow. */
313 ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
315 taskENTER_CRITICAL();
317 pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
318 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
319 pxQueue->pcWriteTo = pxQueue->pcHead;
320 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
321 pxQueue->cRxLock = queueUNLOCKED;
322 pxQueue->cTxLock = queueUNLOCKED;
324 if( xNewQueue == pdFALSE )
326 /* If there are tasks blocked waiting to read from the queue, then
327 * the tasks will remain blocked as after this function exits the queue
328 * will still be empty. If there are tasks blocked waiting to write to
329 * the queue, then one should be unblocked as after this function exits
330 * it will be possible to write to it. */
331 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
333 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
335 queueYIELD_IF_USING_PREEMPTION();
339 mtCOVERAGE_TEST_MARKER();
344 mtCOVERAGE_TEST_MARKER();
349 /* Ensure the event queues start in the correct state. */
350 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
351 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
361 configASSERT( xReturn != pdFAIL );
363 /* A value is returned for calling semantic consistency with previous
365 traceRETURN_xQueueGenericReset( xReturn );
369 /*-----------------------------------------------------------*/
371 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
373 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
374 const UBaseType_t uxItemSize,
375 uint8_t * pucQueueStorage,
376 StaticQueue_t * pxStaticQueue,
377 const uint8_t ucQueueType )
379 Queue_t * pxNewQueue = NULL;
381 traceENTER_xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
383 /* The StaticQueue_t structure and the queue storage area must be
385 configASSERT( pxStaticQueue );
387 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
388 ( pxStaticQueue != NULL ) &&
390 /* A queue storage area should be provided if the item size is not 0, and
391 * should not be provided if the item size is 0. */
392 ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0U ) ) ) &&
393 ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0U ) ) ) )
395 #if ( configASSERT_DEFINED == 1 )
397 /* Sanity check that the size of the structure used to declare a
398 * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
399 * the real queue and semaphore structures. */
400 volatile size_t xSize = sizeof( StaticQueue_t );
402 /* This assertion cannot be branch covered in unit tests */
403 configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
404 ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
406 #endif /* configASSERT_DEFINED */
408 /* The address of a statically allocated queue was passed in, use it.
409 * The address of a statically allocated storage area was also passed in
410 * but is already set. */
411 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
413 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
415 /* Queues can be allocated wither statically or dynamically, so
416 * note this queue was allocated statically in case the queue is
418 pxNewQueue->ucStaticallyAllocated = pdTRUE;
420 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
422 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
426 configASSERT( pxNewQueue );
427 mtCOVERAGE_TEST_MARKER();
430 traceRETURN_xQueueGenericCreateStatic( pxNewQueue );
435 #endif /* configSUPPORT_STATIC_ALLOCATION */
436 /*-----------------------------------------------------------*/
438 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
440 BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
441 uint8_t ** ppucQueueStorage,
442 StaticQueue_t ** ppxStaticQueue )
445 Queue_t * const pxQueue = xQueue;
447 traceENTER_xQueueGenericGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue );
449 configASSERT( pxQueue );
450 configASSERT( ppxStaticQueue );
452 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
454 /* Check if the queue was statically allocated. */
455 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
457 if( ppucQueueStorage != NULL )
459 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
462 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
470 #else /* configSUPPORT_DYNAMIC_ALLOCATION */
472 /* Queue must have been statically allocated. */
473 if( ppucQueueStorage != NULL )
475 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
478 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
481 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
483 traceRETURN_xQueueGenericGetStaticBuffers( xReturn );
488 #endif /* configSUPPORT_STATIC_ALLOCATION */
489 /*-----------------------------------------------------------*/
491 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
493 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
494 const UBaseType_t uxItemSize,
495 const uint8_t ucQueueType )
497 Queue_t * pxNewQueue = NULL;
498 size_t xQueueSizeInBytes;
499 uint8_t * pucQueueStorage;
501 traceENTER_xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
503 if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
504 /* Check for multiplication overflow. */
505 ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
506 /* Check for addition overflow. */
507 ( ( UBaseType_t ) ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
509 /* Allocate enough space to hold the maximum number of items that
510 * can be in the queue at any time. It is valid for uxItemSize to be
511 * zero in the case the queue is used as a semaphore. */
512 xQueueSizeInBytes = ( size_t ) ( ( size_t ) uxQueueLength * ( size_t ) uxItemSize );
514 /* Allocate the queue and storage area. Justification for MISRA
515 * deviation as follows: pvPortMalloc() always ensures returned memory
516 * blocks are aligned per the requirements of the MCU stack. In this case
517 * pvPortMalloc() must return a pointer that is guaranteed to meet the
518 * alignment requirements of the Queue_t structure - which in this case
519 * is an int8_t *. Therefore, whenever the stack alignment requirements
520 * are greater than or equal to the pointer to char requirements the cast
521 * is safe. In other cases alignment requirements are not strict (one or
523 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
525 if( pxNewQueue != NULL )
527 /* Jump past the queue structure to find the location of the queue
529 pucQueueStorage = ( uint8_t * ) pxNewQueue;
530 pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
532 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
534 /* Queues can be created either statically or dynamically, so
535 * note this task was created dynamically in case it is later
537 pxNewQueue->ucStaticallyAllocated = pdFALSE;
539 #endif /* configSUPPORT_STATIC_ALLOCATION */
541 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
545 traceQUEUE_CREATE_FAILED( ucQueueType );
546 mtCOVERAGE_TEST_MARKER();
551 configASSERT( pxNewQueue );
552 mtCOVERAGE_TEST_MARKER();
555 traceRETURN_xQueueGenericCreate( pxNewQueue );
560 #endif /* configSUPPORT_STATIC_ALLOCATION */
561 /*-----------------------------------------------------------*/
563 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
564 const UBaseType_t uxItemSize,
565 uint8_t * pucQueueStorage,
566 const uint8_t ucQueueType,
567 Queue_t * pxNewQueue )
569 /* Remove compiler warnings about unused parameters should
570 * configUSE_TRACE_FACILITY not be set to 1. */
571 ( void ) ucQueueType;
573 if( uxItemSize == ( UBaseType_t ) 0 )
575 /* No RAM was allocated for the queue storage area, but PC head cannot
576 * be set to NULL because NULL is used as a key to say the queue is used as
577 * a mutex. Therefore just set pcHead to point to the queue as a benign
578 * value that is known to be within the memory map. */
579 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
583 /* Set the head to the start of the queue storage area. */
584 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
587 /* Initialise the queue members as described where the queue type is
589 pxNewQueue->uxLength = uxQueueLength;
590 pxNewQueue->uxItemSize = uxItemSize;
591 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
593 #if ( configUSE_TRACE_FACILITY == 1 )
595 pxNewQueue->ucQueueType = ucQueueType;
597 #endif /* configUSE_TRACE_FACILITY */
599 #if ( configUSE_QUEUE_SETS == 1 )
601 pxNewQueue->pxQueueSetContainer = NULL;
603 #endif /* configUSE_QUEUE_SETS */
605 traceQUEUE_CREATE( pxNewQueue );
607 /*-----------------------------------------------------------*/
609 #if ( configUSE_MUTEXES == 1 )
611 static void prvInitialiseMutex( Queue_t * pxNewQueue )
613 if( pxNewQueue != NULL )
615 /* The queue create function will set all the queue structure members
616 * correctly for a generic queue, but this function is creating a
617 * mutex. Overwrite those members that need to be set differently -
618 * in particular the information required for priority inheritance. */
619 pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
620 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
622 /* In case this is a recursive mutex. */
623 pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
625 traceCREATE_MUTEX( pxNewQueue );
627 /* Start with the semaphore in the expected state. */
628 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
632 traceCREATE_MUTEX_FAILED();
636 #endif /* configUSE_MUTEXES */
637 /*-----------------------------------------------------------*/
639 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
641 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
643 QueueHandle_t xNewQueue;
644 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
646 traceENTER_xQueueCreateMutex( ucQueueType );
648 xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
649 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
651 traceRETURN_xQueueCreateMutex( xNewQueue );
656 #endif /* configUSE_MUTEXES */
657 /*-----------------------------------------------------------*/
659 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
661 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
662 StaticQueue_t * pxStaticQueue )
664 QueueHandle_t xNewQueue;
665 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
667 traceENTER_xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
669 /* Prevent compiler warnings about unused parameters if
670 * configUSE_TRACE_FACILITY does not equal 1. */
671 ( void ) ucQueueType;
673 xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
674 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
676 traceRETURN_xQueueCreateMutexStatic( xNewQueue );
681 #endif /* configUSE_MUTEXES */
682 /*-----------------------------------------------------------*/
684 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
686 TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
688 TaskHandle_t pxReturn;
689 Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
691 traceENTER_xQueueGetMutexHolder( xSemaphore );
693 configASSERT( xSemaphore );
695 /* This function is called by xSemaphoreGetMutexHolder(), and should not
696 * be called directly. Note: This is a good way of determining if the
697 * calling task is the mutex holder, but not a good way of determining the
698 * identity of the mutex holder, as the holder may change between the
699 * following critical section exiting and the function returning. */
700 taskENTER_CRITICAL();
702 if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
704 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
713 traceRETURN_xQueueGetMutexHolder( pxReturn );
716 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
718 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
719 /*-----------------------------------------------------------*/
721 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
723 TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
725 TaskHandle_t pxReturn;
727 traceENTER_xQueueGetMutexHolderFromISR( xSemaphore );
729 configASSERT( xSemaphore );
731 /* Mutexes cannot be used in interrupt service routines, so the mutex
732 * holder should not change in an ISR, and therefore a critical section is
733 * not required here. */
734 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
736 pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
743 traceRETURN_xQueueGetMutexHolderFromISR( pxReturn );
746 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
748 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
749 /*-----------------------------------------------------------*/
751 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
753 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
756 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
758 traceENTER_xQueueGiveMutexRecursive( xMutex );
760 configASSERT( pxMutex );
762 /* If this is the task that holds the mutex then xMutexHolder will not
763 * change outside of this task. If this task does not hold the mutex then
764 * pxMutexHolder can never coincidentally equal the tasks handle, and as
765 * this is the only condition we are interested in it does not matter if
766 * pxMutexHolder is accessed simultaneously by another task. Therefore no
767 * mutual exclusion is required to test the pxMutexHolder variable. */
768 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
770 traceGIVE_MUTEX_RECURSIVE( pxMutex );
772 /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
773 * the task handle, therefore no underflow check is required. Also,
774 * uxRecursiveCallCount is only modified by the mutex holder, and as
775 * there can only be one, no mutual exclusion is required to modify the
776 * uxRecursiveCallCount member. */
777 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
779 /* Has the recursive call count unwound to 0? */
780 if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
782 /* Return the mutex. This will automatically unblock any other
783 * task that might be waiting to access the mutex. */
784 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
788 mtCOVERAGE_TEST_MARKER();
795 /* The mutex cannot be given because the calling task is not the
799 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
802 traceRETURN_xQueueGiveMutexRecursive( xReturn );
807 #endif /* configUSE_RECURSIVE_MUTEXES */
808 /*-----------------------------------------------------------*/
810 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
812 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
813 TickType_t xTicksToWait )
816 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
818 traceENTER_xQueueTakeMutexRecursive( xMutex, xTicksToWait );
820 configASSERT( pxMutex );
822 /* Comments regarding mutual exclusion as per those within
823 * xQueueGiveMutexRecursive(). */
825 traceTAKE_MUTEX_RECURSIVE( pxMutex );
827 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
829 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
834 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
836 /* pdPASS will only be returned if the mutex was successfully
837 * obtained. The calling task may have entered the Blocked state
838 * before reaching here. */
839 if( xReturn != pdFAIL )
841 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
845 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
849 traceRETURN_xQueueTakeMutexRecursive( xReturn );
854 #endif /* configUSE_RECURSIVE_MUTEXES */
855 /*-----------------------------------------------------------*/
857 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
859 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
860 const UBaseType_t uxInitialCount,
861 StaticQueue_t * pxStaticQueue )
863 QueueHandle_t xHandle = NULL;
865 traceENTER_xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
867 if( ( uxMaxCount != 0U ) &&
868 ( uxInitialCount <= uxMaxCount ) )
870 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
872 if( xHandle != NULL )
874 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
876 traceCREATE_COUNTING_SEMAPHORE();
880 traceCREATE_COUNTING_SEMAPHORE_FAILED();
885 configASSERT( xHandle );
886 mtCOVERAGE_TEST_MARKER();
889 traceRETURN_xQueueCreateCountingSemaphoreStatic( xHandle );
894 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
895 /*-----------------------------------------------------------*/
897 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
899 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
900 const UBaseType_t uxInitialCount )
902 QueueHandle_t xHandle = NULL;
904 traceENTER_xQueueCreateCountingSemaphore( uxMaxCount, uxInitialCount );
906 if( ( uxMaxCount != 0U ) &&
907 ( uxInitialCount <= uxMaxCount ) )
909 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
911 if( xHandle != NULL )
913 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
915 traceCREATE_COUNTING_SEMAPHORE();
919 traceCREATE_COUNTING_SEMAPHORE_FAILED();
924 configASSERT( xHandle );
925 mtCOVERAGE_TEST_MARKER();
928 traceRETURN_xQueueCreateCountingSemaphore( xHandle );
933 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
934 /*-----------------------------------------------------------*/
936 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
937 const void * const pvItemToQueue,
938 TickType_t xTicksToWait,
939 const BaseType_t xCopyPosition )
941 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
943 Queue_t * const pxQueue = xQueue;
945 traceENTER_xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
947 configASSERT( pxQueue );
948 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
949 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
950 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
952 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
956 /*lint -save -e904 This function relaxes the coding standard somewhat to
957 * allow return statements within the function itself. This is done in the
958 * interest of execution time efficiency. */
961 taskENTER_CRITICAL();
963 /* Is there room on the queue now? The running task must be the
964 * highest priority task wanting to access the queue. If the head item
965 * in the queue is to be overwritten then it does not matter if the
967 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
969 traceQUEUE_SEND( pxQueue );
971 #if ( configUSE_QUEUE_SETS == 1 )
973 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
975 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
977 if( pxQueue->pxQueueSetContainer != NULL )
979 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
981 /* Do not notify the queue set as an existing item
982 * was overwritten in the queue so the number of items
983 * in the queue has not changed. */
984 mtCOVERAGE_TEST_MARKER();
986 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
988 /* The queue is a member of a queue set, and posting
989 * to the queue set caused a higher priority task to
990 * unblock. A context switch is required. */
991 queueYIELD_IF_USING_PREEMPTION();
995 mtCOVERAGE_TEST_MARKER();
1000 /* If there was a task waiting for data to arrive on the
1001 * queue then unblock it now. */
1002 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1004 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1006 /* The unblocked task has a priority higher than
1007 * our own so yield immediately. Yes it is ok to
1008 * do this from within the critical section - the
1009 * kernel takes care of that. */
1010 queueYIELD_IF_USING_PREEMPTION();
1014 mtCOVERAGE_TEST_MARKER();
1017 else if( xYieldRequired != pdFALSE )
1019 /* This path is a special case that will only get
1020 * executed if the task was holding multiple mutexes
1021 * and the mutexes were given back in an order that is
1022 * different to that in which they were taken. */
1023 queueYIELD_IF_USING_PREEMPTION();
1027 mtCOVERAGE_TEST_MARKER();
1031 #else /* configUSE_QUEUE_SETS */
1033 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1035 /* If there was a task waiting for data to arrive on the
1036 * queue then unblock it now. */
1037 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1039 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1041 /* The unblocked task has a priority higher than
1042 * our own so yield immediately. Yes it is ok to do
1043 * this from within the critical section - the kernel
1044 * takes care of that. */
1045 queueYIELD_IF_USING_PREEMPTION();
1049 mtCOVERAGE_TEST_MARKER();
1052 else if( xYieldRequired != pdFALSE )
1054 /* This path is a special case that will only get
1055 * executed if the task was holding multiple mutexes and
1056 * the mutexes were given back in an order that is
1057 * different to that in which they were taken. */
1058 queueYIELD_IF_USING_PREEMPTION();
1062 mtCOVERAGE_TEST_MARKER();
1065 #endif /* configUSE_QUEUE_SETS */
1067 taskEXIT_CRITICAL();
1069 traceRETURN_xQueueGenericSend( pdPASS );
1075 if( xTicksToWait == ( TickType_t ) 0 )
1077 /* The queue was full and no block time is specified (or
1078 * the block time has expired) so leave now. */
1079 taskEXIT_CRITICAL();
1081 /* Return to the original privilege level before exiting
1083 traceQUEUE_SEND_FAILED( pxQueue );
1084 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1086 return errQUEUE_FULL;
1088 else if( xEntryTimeSet == pdFALSE )
1090 /* The queue was full and a block time was specified so
1091 * configure the timeout structure. */
1092 vTaskInternalSetTimeOutState( &xTimeOut );
1093 xEntryTimeSet = pdTRUE;
1097 /* Entry time was already set. */
1098 mtCOVERAGE_TEST_MARKER();
1102 taskEXIT_CRITICAL();
1104 /* Interrupts and other tasks can send to and receive from the queue
1105 * now the critical section has been exited. */
1108 prvLockQueue( pxQueue );
1110 /* Update the timeout state to see if it has expired yet. */
1111 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1113 if( prvIsQueueFull( pxQueue ) != pdFALSE )
1115 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
1116 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
1118 /* Unlocking the queue means queue events can effect the
1119 * event list. It is possible that interrupts occurring now
1120 * remove this task from the event list again - but as the
1121 * scheduler is suspended the task will go onto the pending
1122 * ready list instead of the actual ready list. */
1123 prvUnlockQueue( pxQueue );
1125 /* Resuming the scheduler will move tasks from the pending
1126 * ready list into the ready list - so it is feasible that this
1127 * task is already in the ready list before it yields - in which
1128 * case the yield will not cause a context switch unless there
1129 * is also a higher priority task in the pending ready list. */
1130 if( xTaskResumeAll() == pdFALSE )
1132 taskYIELD_WITHIN_API();
1138 prvUnlockQueue( pxQueue );
1139 ( void ) xTaskResumeAll();
1144 /* The timeout has expired. */
1145 prvUnlockQueue( pxQueue );
1146 ( void ) xTaskResumeAll();
1148 traceQUEUE_SEND_FAILED( pxQueue );
1149 traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1151 return errQUEUE_FULL;
1153 } /*lint -restore */
1155 /*-----------------------------------------------------------*/
1157 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
1158 const void * const pvItemToQueue,
1159 BaseType_t * const pxHigherPriorityTaskWoken,
1160 const BaseType_t xCopyPosition )
1163 UBaseType_t uxSavedInterruptStatus;
1164 Queue_t * const pxQueue = xQueue;
1166 traceENTER_xQueueGenericSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
1168 configASSERT( pxQueue );
1169 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1170 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1172 /* RTOS ports that support interrupt nesting have the concept of a maximum
1173 * system call (or maximum API call) interrupt priority. Interrupts that are
1174 * above the maximum system call priority are kept permanently enabled, even
1175 * when the RTOS kernel is in a critical section, but cannot make any calls to
1176 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1177 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1178 * failure if a FreeRTOS API function is called from an interrupt that has been
1179 * assigned a priority above the configured maximum system call priority.
1180 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1181 * that have been assigned a priority at or (logically) below the maximum
1182 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1183 * safe API to ensure interrupt entry is as fast and as simple as possible.
1184 * More information (albeit Cortex-M specific) is provided on the following
1185 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1186 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1188 /* Similar to xQueueGenericSend, except without blocking if there is no room
1189 * in the queue. Also don't directly wake a task that was blocked on a queue
1190 * read, instead return a flag to say whether a context switch is required or
1191 * not (i.e. has a task with a higher priority than us been woken by this
1193 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1195 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1197 const int8_t cTxLock = pxQueue->cTxLock;
1198 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
1200 traceQUEUE_SEND_FROM_ISR( pxQueue );
1202 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1203 * semaphore or mutex. That means prvCopyDataToQueue() cannot result
1204 * in a task disinheriting a priority and prvCopyDataToQueue() can be
1205 * called here even though the disinherit function does not check if
1206 * the scheduler is suspended before accessing the ready lists. */
1207 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1209 /* The event list is not altered if the queue is locked. This will
1210 * be done when the queue is unlocked later. */
1211 if( cTxLock == queueUNLOCKED )
1213 #if ( configUSE_QUEUE_SETS == 1 )
1215 if( pxQueue->pxQueueSetContainer != NULL )
1217 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
1219 /* Do not notify the queue set as an existing item
1220 * was overwritten in the queue so the number of items
1221 * in the queue has not changed. */
1222 mtCOVERAGE_TEST_MARKER();
1224 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1226 /* The queue is a member of a queue set, and posting
1227 * to the queue set caused a higher priority task to
1228 * unblock. A context switch is required. */
1229 if( pxHigherPriorityTaskWoken != NULL )
1231 *pxHigherPriorityTaskWoken = pdTRUE;
1235 mtCOVERAGE_TEST_MARKER();
1240 mtCOVERAGE_TEST_MARKER();
1245 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1247 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1249 /* The task waiting has a higher priority so
1250 * record that a context switch is required. */
1251 if( pxHigherPriorityTaskWoken != NULL )
1253 *pxHigherPriorityTaskWoken = pdTRUE;
1257 mtCOVERAGE_TEST_MARKER();
1262 mtCOVERAGE_TEST_MARKER();
1267 mtCOVERAGE_TEST_MARKER();
1271 #else /* configUSE_QUEUE_SETS */
1273 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1275 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1277 /* The task waiting has a higher priority so record that a
1278 * context switch is required. */
1279 if( pxHigherPriorityTaskWoken != NULL )
1281 *pxHigherPriorityTaskWoken = pdTRUE;
1285 mtCOVERAGE_TEST_MARKER();
1290 mtCOVERAGE_TEST_MARKER();
1295 mtCOVERAGE_TEST_MARKER();
1298 /* Not used in this path. */
1299 ( void ) uxPreviousMessagesWaiting;
1301 #endif /* configUSE_QUEUE_SETS */
1305 /* Increment the lock count so the task that unlocks the queue
1306 * knows that data was posted while it was locked. */
1307 prvIncrementQueueTxLock( pxQueue, cTxLock );
1314 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1315 xReturn = errQUEUE_FULL;
1318 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1320 traceRETURN_xQueueGenericSendFromISR( xReturn );
1324 /*-----------------------------------------------------------*/
1326 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
1327 BaseType_t * const pxHigherPriorityTaskWoken )
1330 UBaseType_t uxSavedInterruptStatus;
1331 Queue_t * const pxQueue = xQueue;
1333 traceENTER_xQueueGiveFromISR( xQueue, pxHigherPriorityTaskWoken );
1335 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1336 * item size is 0. Don't directly wake a task that was blocked on a queue
1337 * read, instead return a flag to say whether a context switch is required or
1338 * not (i.e. has a task with a higher priority than us been woken by this
1341 configASSERT( pxQueue );
1343 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1344 * if the item size is not 0. */
1345 configASSERT( pxQueue->uxItemSize == 0 );
1347 /* Normally a mutex would not be given from an interrupt, especially if
1348 * there is a mutex holder, as priority inheritance makes no sense for an
1349 * interrupts, only tasks. */
1350 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1352 /* RTOS ports that support interrupt nesting have the concept of a maximum
1353 * system call (or maximum API call) interrupt priority. Interrupts that are
1354 * above the maximum system call priority are kept permanently enabled, even
1355 * when the RTOS kernel is in a critical section, but cannot make any calls to
1356 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1357 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1358 * failure if a FreeRTOS API function is called from an interrupt that has been
1359 * assigned a priority above the configured maximum system call priority.
1360 * Only FreeRTOS functions that end in FromISR can be called from interrupts
1361 * that have been assigned a priority at or (logically) below the maximum
1362 * system call interrupt priority. FreeRTOS maintains a separate interrupt
1363 * safe API to ensure interrupt entry is as fast and as simple as possible.
1364 * More information (albeit Cortex-M specific) is provided on the following
1365 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1366 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1368 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1370 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1372 /* When the queue is used to implement a semaphore no data is ever
1373 * moved through the queue but it is still valid to see if the queue 'has
1375 if( uxMessagesWaiting < pxQueue->uxLength )
1377 const int8_t cTxLock = pxQueue->cTxLock;
1379 traceQUEUE_SEND_FROM_ISR( pxQueue );
1381 /* A task can only have an inherited priority if it is a mutex
1382 * holder - and if there is a mutex holder then the mutex cannot be
1383 * given from an ISR. As this is the ISR version of the function it
1384 * can be assumed there is no mutex holder and no need to determine if
1385 * priority disinheritance is needed. Simply increase the count of
1386 * messages (semaphores) available. */
1387 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
1389 /* The event list is not altered if the queue is locked. This will
1390 * be done when the queue is unlocked later. */
1391 if( cTxLock == queueUNLOCKED )
1393 #if ( configUSE_QUEUE_SETS == 1 )
1395 if( pxQueue->pxQueueSetContainer != NULL )
1397 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1399 /* The semaphore is a member of a queue set, and
1400 * posting to the queue set caused a higher priority
1401 * task to unblock. A context switch is required. */
1402 if( pxHigherPriorityTaskWoken != NULL )
1404 *pxHigherPriorityTaskWoken = pdTRUE;
1408 mtCOVERAGE_TEST_MARKER();
1413 mtCOVERAGE_TEST_MARKER();
1418 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1420 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1422 /* The task waiting has a higher priority so
1423 * record that a context switch is required. */
1424 if( pxHigherPriorityTaskWoken != NULL )
1426 *pxHigherPriorityTaskWoken = pdTRUE;
1430 mtCOVERAGE_TEST_MARKER();
1435 mtCOVERAGE_TEST_MARKER();
1440 mtCOVERAGE_TEST_MARKER();
1444 #else /* configUSE_QUEUE_SETS */
1446 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1448 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1450 /* The task waiting has a higher priority so record that a
1451 * context switch is required. */
1452 if( pxHigherPriorityTaskWoken != NULL )
1454 *pxHigherPriorityTaskWoken = pdTRUE;
1458 mtCOVERAGE_TEST_MARKER();
1463 mtCOVERAGE_TEST_MARKER();
1468 mtCOVERAGE_TEST_MARKER();
1471 #endif /* configUSE_QUEUE_SETS */
1475 /* Increment the lock count so the task that unlocks the queue
1476 * knows that data was posted while it was locked. */
1477 prvIncrementQueueTxLock( pxQueue, cTxLock );
1484 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1485 xReturn = errQUEUE_FULL;
1488 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1490 traceRETURN_xQueueGiveFromISR( xReturn );
1494 /*-----------------------------------------------------------*/
1496 BaseType_t xQueueReceive( QueueHandle_t xQueue,
1497 void * const pvBuffer,
1498 TickType_t xTicksToWait )
1500 BaseType_t xEntryTimeSet = pdFALSE;
1502 Queue_t * const pxQueue = xQueue;
1504 traceENTER_xQueueReceive( xQueue, pvBuffer, xTicksToWait );
1506 /* Check the pointer is not NULL. */
1507 configASSERT( ( pxQueue ) );
1509 /* The buffer into which data is received can only be NULL if the data size
1510 * is zero (so no data is copied into the buffer). */
1511 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1513 /* Cannot block if the scheduler is suspended. */
1514 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1516 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1520 /*lint -save -e904 This function relaxes the coding standard somewhat to
1521 * allow return statements within the function itself. This is done in the
1522 * interest of execution time efficiency. */
1525 taskENTER_CRITICAL();
1527 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1529 /* Is there data in the queue now? To be running the calling task
1530 * must be the highest priority task wanting to access the queue. */
1531 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1533 /* Data available, remove one item. */
1534 prvCopyDataFromQueue( pxQueue, pvBuffer );
1535 traceQUEUE_RECEIVE( pxQueue );
1536 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
1538 /* There is now space in the queue, were any tasks waiting to
1539 * post to the queue? If so, unblock the highest priority waiting
1541 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1543 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1545 queueYIELD_IF_USING_PREEMPTION();
1549 mtCOVERAGE_TEST_MARKER();
1554 mtCOVERAGE_TEST_MARKER();
1557 taskEXIT_CRITICAL();
1559 traceRETURN_xQueueReceive( pdPASS );
1565 if( xTicksToWait == ( TickType_t ) 0 )
1567 /* The queue was empty and no block time is specified (or
1568 * the block time has expired) so leave now. */
1569 taskEXIT_CRITICAL();
1571 traceQUEUE_RECEIVE_FAILED( pxQueue );
1572 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1574 return errQUEUE_EMPTY;
1576 else if( xEntryTimeSet == pdFALSE )
1578 /* The queue was empty and a block time was specified so
1579 * configure the timeout structure. */
1580 vTaskInternalSetTimeOutState( &xTimeOut );
1581 xEntryTimeSet = pdTRUE;
1585 /* Entry time was already set. */
1586 mtCOVERAGE_TEST_MARKER();
1590 taskEXIT_CRITICAL();
1592 /* Interrupts and other tasks can send to and receive from the queue
1593 * now the critical section has been exited. */
1596 prvLockQueue( pxQueue );
1598 /* Update the timeout state to see if it has expired yet. */
1599 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1601 /* The timeout has not expired. If the queue is still empty place
1602 * the task on the list of tasks waiting to receive from the queue. */
1603 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1605 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1606 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1607 prvUnlockQueue( pxQueue );
1609 if( xTaskResumeAll() == pdFALSE )
1611 taskYIELD_WITHIN_API();
1615 mtCOVERAGE_TEST_MARKER();
1620 /* The queue contains data again. Loop back to try and read the
1622 prvUnlockQueue( pxQueue );
1623 ( void ) xTaskResumeAll();
1628 /* Timed out. If there is no data in the queue exit, otherwise loop
1629 * back and attempt to read the data. */
1630 prvUnlockQueue( pxQueue );
1631 ( void ) xTaskResumeAll();
1633 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1635 traceQUEUE_RECEIVE_FAILED( pxQueue );
1636 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1638 return errQUEUE_EMPTY;
1642 mtCOVERAGE_TEST_MARKER();
1645 } /*lint -restore */
1647 /*-----------------------------------------------------------*/
1649 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
1650 TickType_t xTicksToWait )
1652 BaseType_t xEntryTimeSet = pdFALSE;
1654 Queue_t * const pxQueue = xQueue;
1656 #if ( configUSE_MUTEXES == 1 )
1657 BaseType_t xInheritanceOccurred = pdFALSE;
1660 traceENTER_xQueueSemaphoreTake( xQueue, xTicksToWait );
1662 /* Check the queue pointer is not NULL. */
1663 configASSERT( ( pxQueue ) );
1665 /* Check this really is a semaphore, in which case the item size will be
1667 configASSERT( pxQueue->uxItemSize == 0 );
1669 /* Cannot block if the scheduler is suspended. */
1670 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1672 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1676 /*lint -save -e904 This function relaxes the coding standard somewhat to allow return
1677 * statements within the function itself. This is done in the interest
1678 * of execution time efficiency. */
1681 taskENTER_CRITICAL();
1683 /* Semaphores are queues with an item size of 0, and where the
1684 * number of messages in the queue is the semaphore's count value. */
1685 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1687 /* Is there data in the queue now? To be running the calling task
1688 * must be the highest priority task wanting to access the queue. */
1689 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1691 traceQUEUE_RECEIVE( pxQueue );
1693 /* Semaphores are queues with a data size of zero and where the
1694 * messages waiting is the semaphore's count. Reduce the count. */
1695 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxSemaphoreCount - ( UBaseType_t ) 1 );
1697 #if ( configUSE_MUTEXES == 1 )
1699 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1701 /* Record the information required to implement
1702 * priority inheritance should it become necessary. */
1703 pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1707 mtCOVERAGE_TEST_MARKER();
1710 #endif /* configUSE_MUTEXES */
1712 /* Check to see if other tasks are blocked waiting to give the
1713 * semaphore, and if so, unblock the highest priority such task. */
1714 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1716 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1718 queueYIELD_IF_USING_PREEMPTION();
1722 mtCOVERAGE_TEST_MARKER();
1727 mtCOVERAGE_TEST_MARKER();
1730 taskEXIT_CRITICAL();
1732 traceRETURN_xQueueSemaphoreTake( pdPASS );
1738 if( xTicksToWait == ( TickType_t ) 0 )
1740 /* The semaphore count was 0 and no block time is specified
1741 * (or the block time has expired) so exit now. */
1742 taskEXIT_CRITICAL();
1744 traceQUEUE_RECEIVE_FAILED( pxQueue );
1745 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1747 return errQUEUE_EMPTY;
1749 else if( xEntryTimeSet == pdFALSE )
1751 /* The semaphore count was 0 and a block time was specified
1752 * so configure the timeout structure ready to block. */
1753 vTaskInternalSetTimeOutState( &xTimeOut );
1754 xEntryTimeSet = pdTRUE;
1758 /* Entry time was already set. */
1759 mtCOVERAGE_TEST_MARKER();
1763 taskEXIT_CRITICAL();
1765 /* Interrupts and other tasks can give to and take from the semaphore
1766 * now the critical section has been exited. */
1769 prvLockQueue( pxQueue );
1771 /* Update the timeout state to see if it has expired yet. */
1772 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1774 /* A block time is specified and not expired. If the semaphore
1775 * count is 0 then enter the Blocked state to wait for a semaphore to
1776 * become available. As semaphores are implemented with queues the
1777 * queue being empty is equivalent to the semaphore count being 0. */
1778 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1780 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1782 #if ( configUSE_MUTEXES == 1 )
1784 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1786 taskENTER_CRITICAL();
1788 xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1790 taskEXIT_CRITICAL();
1794 mtCOVERAGE_TEST_MARKER();
1797 #endif /* if ( configUSE_MUTEXES == 1 ) */
1799 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1800 prvUnlockQueue( pxQueue );
1802 if( xTaskResumeAll() == pdFALSE )
1804 taskYIELD_WITHIN_API();
1808 mtCOVERAGE_TEST_MARKER();
1813 /* There was no timeout and the semaphore count was not 0, so
1814 * attempt to take the semaphore again. */
1815 prvUnlockQueue( pxQueue );
1816 ( void ) xTaskResumeAll();
1822 prvUnlockQueue( pxQueue );
1823 ( void ) xTaskResumeAll();
1825 /* If the semaphore count is 0 exit now as the timeout has
1826 * expired. Otherwise return to attempt to take the semaphore that is
1827 * known to be available. As semaphores are implemented by queues the
1828 * queue being empty is equivalent to the semaphore count being 0. */
1829 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1831 #if ( configUSE_MUTEXES == 1 )
1833 /* xInheritanceOccurred could only have be set if
1834 * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1835 * test the mutex type again to check it is actually a mutex. */
1836 if( xInheritanceOccurred != pdFALSE )
1838 taskENTER_CRITICAL();
1840 UBaseType_t uxHighestWaitingPriority;
1842 /* This task blocking on the mutex caused another
1843 * task to inherit this task's priority. Now this task
1844 * has timed out the priority should be disinherited
1845 * again, but only as low as the next highest priority
1846 * task that is waiting for the same mutex. */
1847 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1848 vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1850 taskEXIT_CRITICAL();
1853 #endif /* configUSE_MUTEXES */
1855 traceQUEUE_RECEIVE_FAILED( pxQueue );
1856 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1858 return errQUEUE_EMPTY;
1862 mtCOVERAGE_TEST_MARKER();
1865 } /*lint -restore */
1867 /*-----------------------------------------------------------*/
1869 BaseType_t xQueuePeek( QueueHandle_t xQueue,
1870 void * const pvBuffer,
1871 TickType_t xTicksToWait )
1873 BaseType_t xEntryTimeSet = pdFALSE;
1875 int8_t * pcOriginalReadPosition;
1876 Queue_t * const pxQueue = xQueue;
1878 traceENTER_xQueuePeek( xQueue, pvBuffer, xTicksToWait );
1880 /* Check the pointer is not NULL. */
1881 configASSERT( ( pxQueue ) );
1883 /* The buffer into which data is received can only be NULL if the data size
1884 * is zero (so no data is copied into the buffer. */
1885 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1887 /* Cannot block if the scheduler is suspended. */
1888 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1890 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1894 /*lint -save -e904 This function relaxes the coding standard somewhat to
1895 * allow return statements within the function itself. This is done in the
1896 * interest of execution time efficiency. */
1899 taskENTER_CRITICAL();
1901 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1903 /* Is there data in the queue now? To be running the calling task
1904 * must be the highest priority task wanting to access the queue. */
1905 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1907 /* Remember the read position so it can be reset after the data
1908 * is read from the queue as this function is only peeking the
1909 * data, not removing it. */
1910 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1912 prvCopyDataFromQueue( pxQueue, pvBuffer );
1913 traceQUEUE_PEEK( pxQueue );
1915 /* The data is not being removed, so reset the read pointer. */
1916 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1918 /* The data is being left in the queue, so see if there are
1919 * any other tasks waiting for the data. */
1920 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1922 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1924 /* The task waiting has a higher priority than this task. */
1925 queueYIELD_IF_USING_PREEMPTION();
1929 mtCOVERAGE_TEST_MARKER();
1934 mtCOVERAGE_TEST_MARKER();
1937 taskEXIT_CRITICAL();
1939 traceRETURN_xQueuePeek( pdPASS );
1945 if( xTicksToWait == ( TickType_t ) 0 )
1947 /* The queue was empty and no block time is specified (or
1948 * the block time has expired) so leave now. */
1949 taskEXIT_CRITICAL();
1951 traceQUEUE_PEEK_FAILED( pxQueue );
1952 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
1954 return errQUEUE_EMPTY;
1956 else if( xEntryTimeSet == pdFALSE )
1958 /* The queue was empty and a block time was specified so
1959 * configure the timeout structure ready to enter the blocked
1961 vTaskInternalSetTimeOutState( &xTimeOut );
1962 xEntryTimeSet = pdTRUE;
1966 /* Entry time was already set. */
1967 mtCOVERAGE_TEST_MARKER();
1971 taskEXIT_CRITICAL();
1973 /* Interrupts and other tasks can send to and receive from the queue
1974 * now that the critical section has been exited. */
1977 prvLockQueue( pxQueue );
1979 /* Update the timeout state to see if it has expired yet. */
1980 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1982 /* Timeout has not expired yet, check to see if there is data in the
1983 * queue now, and if not enter the Blocked state to wait for data. */
1984 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1986 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1987 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1988 prvUnlockQueue( pxQueue );
1990 if( xTaskResumeAll() == pdFALSE )
1992 taskYIELD_WITHIN_API();
1996 mtCOVERAGE_TEST_MARKER();
2001 /* There is data in the queue now, so don't enter the blocked
2002 * state, instead return to try and obtain the data. */
2003 prvUnlockQueue( pxQueue );
2004 ( void ) xTaskResumeAll();
2009 /* The timeout has expired. If there is still no data in the queue
2010 * exit, otherwise go back and try to read the data again. */
2011 prvUnlockQueue( pxQueue );
2012 ( void ) xTaskResumeAll();
2014 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2016 traceQUEUE_PEEK_FAILED( pxQueue );
2017 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
2019 return errQUEUE_EMPTY;
2023 mtCOVERAGE_TEST_MARKER();
2026 } /*lint -restore */
2028 /*-----------------------------------------------------------*/
2030 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
2031 void * const pvBuffer,
2032 BaseType_t * const pxHigherPriorityTaskWoken )
2035 UBaseType_t uxSavedInterruptStatus;
2036 Queue_t * const pxQueue = xQueue;
2038 traceENTER_xQueueReceiveFromISR( xQueue, pvBuffer, pxHigherPriorityTaskWoken );
2040 configASSERT( pxQueue );
2041 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2043 /* RTOS ports that support interrupt nesting have the concept of a maximum
2044 * system call (or maximum API call) interrupt priority. Interrupts that are
2045 * above the maximum system call priority are kept permanently enabled, even
2046 * when the RTOS kernel is in a critical section, but cannot make any calls to
2047 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2048 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2049 * failure if a FreeRTOS API function is called from an interrupt that has been
2050 * assigned a priority above the configured maximum system call priority.
2051 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2052 * that have been assigned a priority at or (logically) below the maximum
2053 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2054 * safe API to ensure interrupt entry is as fast and as simple as possible.
2055 * More information (albeit Cortex-M specific) is provided on the following
2056 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2057 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2059 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2061 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2063 /* Cannot block in an ISR, so check there is data available. */
2064 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2066 const int8_t cRxLock = pxQueue->cRxLock;
2068 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
2070 prvCopyDataFromQueue( pxQueue, pvBuffer );
2071 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
2073 /* If the queue is locked the event list will not be modified.
2074 * Instead update the lock count so the task that unlocks the queue
2075 * will know that an ISR has removed data while the queue was
2077 if( cRxLock == queueUNLOCKED )
2079 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2081 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2083 /* The task waiting has a higher priority than us so
2084 * force a context switch. */
2085 if( pxHigherPriorityTaskWoken != NULL )
2087 *pxHigherPriorityTaskWoken = pdTRUE;
2091 mtCOVERAGE_TEST_MARKER();
2096 mtCOVERAGE_TEST_MARKER();
2101 mtCOVERAGE_TEST_MARKER();
2106 /* Increment the lock count so the task that unlocks the queue
2107 * knows that data was removed while it was locked. */
2108 prvIncrementQueueRxLock( pxQueue, cRxLock );
2116 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
2119 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2121 traceRETURN_xQueueReceiveFromISR( xReturn );
2125 /*-----------------------------------------------------------*/
2127 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
2128 void * const pvBuffer )
2131 UBaseType_t uxSavedInterruptStatus;
2132 int8_t * pcOriginalReadPosition;
2133 Queue_t * const pxQueue = xQueue;
2135 traceENTER_xQueuePeekFromISR( xQueue, pvBuffer );
2137 configASSERT( pxQueue );
2138 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2139 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
2141 /* RTOS ports that support interrupt nesting have the concept of a maximum
2142 * system call (or maximum API call) interrupt priority. Interrupts that are
2143 * above the maximum system call priority are kept permanently enabled, even
2144 * when the RTOS kernel is in a critical section, but cannot make any calls to
2145 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2146 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2147 * failure if a FreeRTOS API function is called from an interrupt that has been
2148 * assigned a priority above the configured maximum system call priority.
2149 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2150 * that have been assigned a priority at or (logically) below the maximum
2151 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2152 * safe API to ensure interrupt entry is as fast and as simple as possible.
2153 * More information (albeit Cortex-M specific) is provided on the following
2154 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2155 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2157 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2159 /* Cannot block in an ISR, so check there is data available. */
2160 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2162 traceQUEUE_PEEK_FROM_ISR( pxQueue );
2164 /* Remember the read position so it can be reset as nothing is
2165 * actually being removed from the queue. */
2166 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
2167 prvCopyDataFromQueue( pxQueue, pvBuffer );
2168 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
2175 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
2178 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2180 traceRETURN_xQueuePeekFromISR( xReturn );
2184 /*-----------------------------------------------------------*/
2186 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
2188 UBaseType_t uxReturn;
2190 traceENTER_uxQueueMessagesWaiting( xQueue );
2192 configASSERT( xQueue );
2194 taskENTER_CRITICAL();
2196 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
2198 taskEXIT_CRITICAL();
2200 traceRETURN_uxQueueMessagesWaiting( uxReturn );
2203 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2204 /*-----------------------------------------------------------*/
2206 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
2208 UBaseType_t uxReturn;
2209 Queue_t * const pxQueue = xQueue;
2211 traceENTER_uxQueueSpacesAvailable( xQueue );
2213 configASSERT( pxQueue );
2215 taskENTER_CRITICAL();
2217 uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
2219 taskEXIT_CRITICAL();
2221 traceRETURN_uxQueueSpacesAvailable( uxReturn );
2224 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2225 /*-----------------------------------------------------------*/
2227 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
2229 UBaseType_t uxReturn;
2230 Queue_t * const pxQueue = xQueue;
2232 traceENTER_uxQueueMessagesWaitingFromISR( xQueue );
2234 configASSERT( pxQueue );
2235 uxReturn = pxQueue->uxMessagesWaiting;
2237 traceRETURN_uxQueueMessagesWaitingFromISR( uxReturn );
2240 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
2241 /*-----------------------------------------------------------*/
2243 void vQueueDelete( QueueHandle_t xQueue )
2245 Queue_t * const pxQueue = xQueue;
2247 traceENTER_vQueueDelete( xQueue );
2249 configASSERT( pxQueue );
2250 traceQUEUE_DELETE( pxQueue );
2252 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2254 vQueueUnregisterQueue( pxQueue );
2258 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2260 /* The queue can only have been allocated dynamically - free it
2262 vPortFree( pxQueue );
2264 #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2266 /* The queue could have been allocated statically or dynamically, so
2267 * check before attempting to free the memory. */
2268 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2270 vPortFree( pxQueue );
2274 mtCOVERAGE_TEST_MARKER();
2277 #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
2279 /* The queue must have been statically allocated, so is not going to be
2280 * deleted. Avoid compiler warnings about the unused parameter. */
2283 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2285 traceRETURN_vQueueDelete();
2287 /*-----------------------------------------------------------*/
2289 #if ( configUSE_TRACE_FACILITY == 1 )
2291 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2293 traceENTER_uxQueueGetQueueNumber( xQueue );
2295 traceRETURN_uxQueueGetQueueNumber( ( ( Queue_t * ) xQueue )->uxQueueNumber );
2297 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2300 #endif /* configUSE_TRACE_FACILITY */
2301 /*-----------------------------------------------------------*/
2303 #if ( configUSE_TRACE_FACILITY == 1 )
2305 void vQueueSetQueueNumber( QueueHandle_t xQueue,
2306 UBaseType_t uxQueueNumber )
2308 traceENTER_vQueueSetQueueNumber( xQueue, uxQueueNumber );
2310 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2312 traceRETURN_vQueueSetQueueNumber();
2315 #endif /* configUSE_TRACE_FACILITY */
2316 /*-----------------------------------------------------------*/
2318 #if ( configUSE_TRACE_FACILITY == 1 )
2320 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2322 traceENTER_ucQueueGetQueueType( xQueue );
2324 traceRETURN_ucQueueGetQueueType( ( ( Queue_t * ) xQueue )->ucQueueType );
2326 return ( ( Queue_t * ) xQueue )->ucQueueType;
2329 #endif /* configUSE_TRACE_FACILITY */
2330 /*-----------------------------------------------------------*/
2332 UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2334 traceENTER_uxQueueGetQueueItemSize( xQueue );
2336 traceRETURN_uxQueueGetQueueItemSize( ( ( Queue_t * ) xQueue )->uxItemSize );
2338 return ( ( Queue_t * ) xQueue )->uxItemSize;
2340 /*-----------------------------------------------------------*/
2342 UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2344 traceENTER_uxQueueGetQueueLength( xQueue );
2346 traceRETURN_uxQueueGetQueueLength( ( ( Queue_t * ) xQueue )->uxLength );
2348 return ( ( Queue_t * ) xQueue )->uxLength;
2350 /*-----------------------------------------------------------*/
2352 #if ( configUSE_MUTEXES == 1 )
2354 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2356 UBaseType_t uxHighestPriorityOfWaitingTasks;
2358 /* If a task waiting for a mutex causes the mutex holder to inherit a
2359 * priority, but the waiting task times out, then the holder should
2360 * disinherit the priority - but only down to the highest priority of any
2361 * other tasks that are waiting for the same mutex. For this purpose,
2362 * return the priority of the highest priority task that is waiting for the
2364 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2366 uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) ( ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ) );
2370 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2373 return uxHighestPriorityOfWaitingTasks;
2376 #endif /* configUSE_MUTEXES */
2377 /*-----------------------------------------------------------*/
2379 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
2380 const void * pvItemToQueue,
2381 const BaseType_t xPosition )
2383 BaseType_t xReturn = pdFALSE;
2384 UBaseType_t uxMessagesWaiting;
2386 /* This function is called from a critical section. */
2388 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2390 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2392 #if ( configUSE_MUTEXES == 1 )
2394 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2396 /* The mutex is no longer being held. */
2397 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2398 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2402 mtCOVERAGE_TEST_MARKER();
2405 #endif /* configUSE_MUTEXES */
2407 else if( xPosition == queueSEND_TO_BACK )
2409 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2410 pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2412 if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2414 pxQueue->pcWriteTo = pxQueue->pcHead;
2418 mtCOVERAGE_TEST_MARKER();
2423 ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
2424 pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2426 if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2428 pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2432 mtCOVERAGE_TEST_MARKER();
2435 if( xPosition == queueOVERWRITE )
2437 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2439 /* An item is not being added but overwritten, so subtract
2440 * one from the recorded number of items in the queue so when
2441 * one is added again below the number of recorded items remains
2443 --uxMessagesWaiting;
2447 mtCOVERAGE_TEST_MARKER();
2452 mtCOVERAGE_TEST_MARKER();
2456 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
2460 /*-----------------------------------------------------------*/
2462 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
2463 void * const pvBuffer )
2465 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2467 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2469 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
2471 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2475 mtCOVERAGE_TEST_MARKER();
2478 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2481 /*-----------------------------------------------------------*/
2483 static void prvUnlockQueue( Queue_t * const pxQueue )
2485 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2487 /* The lock counts contains the number of extra data items placed or
2488 * removed from the queue while the queue was locked. When a queue is
2489 * locked items can be added or removed, but the event lists cannot be
2491 taskENTER_CRITICAL();
2493 int8_t cTxLock = pxQueue->cTxLock;
2495 /* See if data was added to the queue while it was locked. */
2496 while( cTxLock > queueLOCKED_UNMODIFIED )
2498 /* Data was posted while the queue was locked. Are any tasks
2499 * blocked waiting for data to become available? */
2500 #if ( configUSE_QUEUE_SETS == 1 )
2502 if( pxQueue->pxQueueSetContainer != NULL )
2504 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
2506 /* The queue is a member of a queue set, and posting to
2507 * the queue set caused a higher priority task to unblock.
2508 * A context switch is required. */
2513 mtCOVERAGE_TEST_MARKER();
2518 /* Tasks that are removed from the event list will get
2519 * added to the pending ready list as the scheduler is still
2521 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2523 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2525 /* The task waiting has a higher priority so record that a
2526 * context switch is required. */
2531 mtCOVERAGE_TEST_MARKER();
2540 #else /* configUSE_QUEUE_SETS */
2542 /* Tasks that are removed from the event list will get added to
2543 * the pending ready list as the scheduler is still suspended. */
2544 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2546 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2548 /* The task waiting has a higher priority so record that
2549 * a context switch is required. */
2554 mtCOVERAGE_TEST_MARKER();
2562 #endif /* configUSE_QUEUE_SETS */
2567 pxQueue->cTxLock = queueUNLOCKED;
2569 taskEXIT_CRITICAL();
2571 /* Do the same for the Rx lock. */
2572 taskENTER_CRITICAL();
2574 int8_t cRxLock = pxQueue->cRxLock;
2576 while( cRxLock > queueLOCKED_UNMODIFIED )
2578 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2580 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2586 mtCOVERAGE_TEST_MARKER();
2597 pxQueue->cRxLock = queueUNLOCKED;
2599 taskEXIT_CRITICAL();
2601 /*-----------------------------------------------------------*/
2603 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
2607 taskENTER_CRITICAL();
2609 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2618 taskEXIT_CRITICAL();
2622 /*-----------------------------------------------------------*/
2624 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2627 Queue_t * const pxQueue = xQueue;
2629 traceENTER_xQueueIsQueueEmptyFromISR( xQueue );
2631 configASSERT( pxQueue );
2633 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2642 traceRETURN_xQueueIsQueueEmptyFromISR( xReturn );
2645 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2646 /*-----------------------------------------------------------*/
2648 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
2652 taskENTER_CRITICAL();
2654 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2663 taskEXIT_CRITICAL();
2667 /*-----------------------------------------------------------*/
2669 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2672 Queue_t * const pxQueue = xQueue;
2674 traceENTER_xQueueIsQueueFullFromISR( xQueue );
2676 configASSERT( pxQueue );
2678 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2687 traceRETURN_xQueueIsQueueFullFromISR( xReturn );
2690 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2691 /*-----------------------------------------------------------*/
2693 #if ( configUSE_CO_ROUTINES == 1 )
2695 BaseType_t xQueueCRSend( QueueHandle_t xQueue,
2696 const void * pvItemToQueue,
2697 TickType_t xTicksToWait )
2700 Queue_t * const pxQueue = xQueue;
2702 traceENTER_xQueueCRSend( xQueue, pvItemToQueue, xTicksToWait );
2704 /* If the queue is already full we may have to block. A critical section
2705 * is required to prevent an interrupt removing something from the queue
2706 * between the check to see if the queue is full and blocking on the queue. */
2707 portDISABLE_INTERRUPTS();
2709 if( prvIsQueueFull( pxQueue ) != pdFALSE )
2711 /* The queue is full - do we want to block or just leave without
2713 if( xTicksToWait > ( TickType_t ) 0 )
2715 /* As this is called from a coroutine we cannot block directly, but
2716 * return indicating that we need to block. */
2717 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2718 portENABLE_INTERRUPTS();
2719 return errQUEUE_BLOCKED;
2723 portENABLE_INTERRUPTS();
2724 return errQUEUE_FULL;
2728 portENABLE_INTERRUPTS();
2730 portDISABLE_INTERRUPTS();
2732 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2734 /* There is room in the queue, copy the data into the queue. */
2735 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2738 /* Were any co-routines waiting for data to become available? */
2739 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2741 /* In this instance the co-routine could be placed directly
2742 * into the ready list as we are within a critical section.
2743 * Instead the same pending ready list mechanism is used as if
2744 * the event were caused from within an interrupt. */
2745 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2747 /* The co-routine waiting has a higher priority so record
2748 * that a yield might be appropriate. */
2749 xReturn = errQUEUE_YIELD;
2753 mtCOVERAGE_TEST_MARKER();
2758 mtCOVERAGE_TEST_MARKER();
2763 xReturn = errQUEUE_FULL;
2766 portENABLE_INTERRUPTS();
2768 traceRETURN_xQueueCRSend( xReturn );
2773 #endif /* configUSE_CO_ROUTINES */
2774 /*-----------------------------------------------------------*/
2776 #if ( configUSE_CO_ROUTINES == 1 )
2778 BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
2780 TickType_t xTicksToWait )
2783 Queue_t * const pxQueue = xQueue;
2785 traceENTER_xQueueCRReceive( xQueue, pvBuffer, xTicksToWait );
2787 /* If the queue is already empty we may have to block. A critical section
2788 * is required to prevent an interrupt adding something to the queue
2789 * between the check to see if the queue is empty and blocking on the queue. */
2790 portDISABLE_INTERRUPTS();
2792 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2794 /* There are no messages in the queue, do we want to block or just
2795 * leave with nothing? */
2796 if( xTicksToWait > ( TickType_t ) 0 )
2798 /* As this is a co-routine we cannot block directly, but return
2799 * indicating that we need to block. */
2800 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2801 portENABLE_INTERRUPTS();
2802 return errQUEUE_BLOCKED;
2806 portENABLE_INTERRUPTS();
2807 return errQUEUE_FULL;
2812 mtCOVERAGE_TEST_MARKER();
2815 portENABLE_INTERRUPTS();
2817 portDISABLE_INTERRUPTS();
2819 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2821 /* Data is available from the queue. */
2822 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2824 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2826 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2830 mtCOVERAGE_TEST_MARKER();
2833 --( pxQueue->uxMessagesWaiting );
2834 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2838 /* Were any co-routines waiting for space to become available? */
2839 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2841 /* In this instance the co-routine could be placed directly
2842 * into the ready list as we are within a critical section.
2843 * Instead the same pending ready list mechanism is used as if
2844 * the event were caused from within an interrupt. */
2845 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2847 xReturn = errQUEUE_YIELD;
2851 mtCOVERAGE_TEST_MARKER();
2856 mtCOVERAGE_TEST_MARKER();
2864 portENABLE_INTERRUPTS();
2866 traceRETURN_xQueueCRReceive( xReturn );
2871 #endif /* configUSE_CO_ROUTINES */
2872 /*-----------------------------------------------------------*/
2874 #if ( configUSE_CO_ROUTINES == 1 )
2876 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue,
2877 const void * pvItemToQueue,
2878 BaseType_t xCoRoutinePreviouslyWoken )
2880 Queue_t * const pxQueue = xQueue;
2882 traceENTER_xQueueCRSendFromISR( xQueue, pvItemToQueue, xCoRoutinePreviouslyWoken );
2884 /* Cannot block within an ISR so if there is no space on the queue then
2885 * exit without doing anything. */
2886 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2888 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2890 /* We only want to wake one co-routine per ISR, so check that a
2891 * co-routine has not already been woken. */
2892 if( xCoRoutinePreviouslyWoken == pdFALSE )
2894 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2896 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2902 mtCOVERAGE_TEST_MARKER();
2907 mtCOVERAGE_TEST_MARKER();
2912 mtCOVERAGE_TEST_MARKER();
2917 mtCOVERAGE_TEST_MARKER();
2920 traceRETURN_xQueueCRSendFromISR( xCoRoutinePreviouslyWoken );
2922 return xCoRoutinePreviouslyWoken;
2925 #endif /* configUSE_CO_ROUTINES */
2926 /*-----------------------------------------------------------*/
2928 #if ( configUSE_CO_ROUTINES == 1 )
2930 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue,
2932 BaseType_t * pxCoRoutineWoken )
2935 Queue_t * const pxQueue = xQueue;
2937 traceENTER_xQueueCRReceiveFromISR( xQueue, pvBuffer, pxCoRoutineWoken );
2939 /* We cannot block from an ISR, so check there is data available. If
2940 * not then just leave without doing anything. */
2941 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2943 /* Copy the data from the queue. */
2944 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2946 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2948 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2952 mtCOVERAGE_TEST_MARKER();
2955 --( pxQueue->uxMessagesWaiting );
2956 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2958 if( ( *pxCoRoutineWoken ) == pdFALSE )
2960 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2962 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2964 *pxCoRoutineWoken = pdTRUE;
2968 mtCOVERAGE_TEST_MARKER();
2973 mtCOVERAGE_TEST_MARKER();
2978 mtCOVERAGE_TEST_MARKER();
2988 traceRETURN_xQueueCRReceiveFromISR( xReturn );
2993 #endif /* configUSE_CO_ROUTINES */
2994 /*-----------------------------------------------------------*/
2996 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2998 void vQueueAddToRegistry( QueueHandle_t xQueue,
2999 const char * pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3002 QueueRegistryItem_t * pxEntryToWrite = NULL;
3004 traceENTER_vQueueAddToRegistry( xQueue, pcQueueName );
3006 configASSERT( xQueue );
3008 if( pcQueueName != NULL )
3010 /* See if there is an empty space in the registry. A NULL name denotes
3012 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3014 /* Replace an existing entry if the queue is already in the registry. */
3015 if( xQueue == xQueueRegistry[ ux ].xHandle )
3017 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3020 /* Otherwise, store in the next empty location */
3021 else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) )
3023 pxEntryToWrite = &( xQueueRegistry[ ux ] );
3027 mtCOVERAGE_TEST_MARKER();
3032 if( pxEntryToWrite != NULL )
3034 /* Store the information on this queue. */
3035 pxEntryToWrite->pcQueueName = pcQueueName;
3036 pxEntryToWrite->xHandle = xQueue;
3038 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
3041 traceRETURN_vQueueAddToRegistry();
3044 #endif /* configQUEUE_REGISTRY_SIZE */
3045 /*-----------------------------------------------------------*/
3047 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3049 const char * pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3052 const char * pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
3054 traceENTER_pcQueueGetName( xQueue );
3056 configASSERT( xQueue );
3058 /* Note there is nothing here to protect against another task adding or
3059 * removing entries from the registry while it is being searched. */
3061 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3063 if( xQueueRegistry[ ux ].xHandle == xQueue )
3065 pcReturn = xQueueRegistry[ ux ].pcQueueName;
3070 mtCOVERAGE_TEST_MARKER();
3074 traceRETURN_pcQueueGetName( pcReturn );
3077 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
3079 #endif /* configQUEUE_REGISTRY_SIZE */
3080 /*-----------------------------------------------------------*/
3082 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3084 void vQueueUnregisterQueue( QueueHandle_t xQueue )
3088 traceENTER_vQueueUnregisterQueue( xQueue );
3090 configASSERT( xQueue );
3092 /* See if the handle of the queue being unregistered in actually in the
3094 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3096 if( xQueueRegistry[ ux ].xHandle == xQueue )
3098 /* Set the name to NULL to show that this slot if free again. */
3099 xQueueRegistry[ ux ].pcQueueName = NULL;
3101 /* Set the handle to NULL to ensure the same queue handle cannot
3102 * appear in the registry twice if it is added, removed, then
3104 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
3109 mtCOVERAGE_TEST_MARKER();
3113 traceRETURN_vQueueUnregisterQueue();
3114 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
3116 #endif /* configQUEUE_REGISTRY_SIZE */
3117 /*-----------------------------------------------------------*/
3119 #if ( configUSE_TIMERS == 1 )
3121 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
3122 TickType_t xTicksToWait,
3123 const BaseType_t xWaitIndefinitely )
3125 Queue_t * const pxQueue = xQueue;
3127 traceENTER_vQueueWaitForMessageRestricted( xQueue, xTicksToWait, xWaitIndefinitely );
3129 /* This function should not be called by application code hence the
3130 * 'Restricted' in its name. It is not part of the public API. It is
3131 * designed for use by kernel code, and has special calling requirements.
3132 * It can result in vListInsert() being called on a list that can only
3133 * possibly ever have one item in it, so the list will be fast, but even
3134 * so it should be called with the scheduler locked and not from a critical
3137 /* Only do anything if there are no messages in the queue. This function
3138 * will not actually cause the task to block, just place it on a blocked
3139 * list. It will not block until the scheduler is unlocked - at which
3140 * time a yield will be performed. If an item is added to the queue while
3141 * the queue is locked, and the calling task blocks on the queue, then the
3142 * calling task will be immediately unblocked when the queue is unlocked. */
3143 prvLockQueue( pxQueue );
3145 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
3147 /* There is nothing in the queue, block for the specified period. */
3148 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
3152 mtCOVERAGE_TEST_MARKER();
3155 prvUnlockQueue( pxQueue );
3157 traceRETURN_vQueueWaitForMessageRestricted();
3160 #endif /* configUSE_TIMERS */
3161 /*-----------------------------------------------------------*/
3163 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
3165 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
3167 QueueSetHandle_t pxQueue;
3169 traceENTER_xQueueCreateSet( uxEventQueueLength );
3171 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
3173 traceRETURN_xQueueCreateSet( pxQueue );
3178 #endif /* configUSE_QUEUE_SETS */
3179 /*-----------------------------------------------------------*/
3181 #if ( configUSE_QUEUE_SETS == 1 )
3183 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3184 QueueSetHandle_t xQueueSet )
3188 traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
3190 taskENTER_CRITICAL();
3192 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
3194 /* Cannot add a queue/semaphore to more than one queue set. */
3197 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
3199 /* Cannot add a queue/semaphore to a queue set if there are already
3200 * items in the queue/semaphore. */
3205 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
3209 taskEXIT_CRITICAL();
3211 traceRETURN_xQueueAddToSet( xReturn );
3216 #endif /* configUSE_QUEUE_SETS */
3217 /*-----------------------------------------------------------*/
3219 #if ( configUSE_QUEUE_SETS == 1 )
3221 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3222 QueueSetHandle_t xQueueSet )
3225 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
3227 traceENTER_xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet );
3229 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
3231 /* The queue was not a member of the set. */
3234 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
3236 /* It is dangerous to remove a queue from a set when the queue is
3237 * not empty because the queue set will still hold pending events for
3243 taskENTER_CRITICAL();
3245 /* The queue is no longer contained in the set. */
3246 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
3248 taskEXIT_CRITICAL();
3252 traceRETURN_xQueueRemoveFromSet( xReturn );
3255 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
3257 #endif /* configUSE_QUEUE_SETS */
3258 /*-----------------------------------------------------------*/
3260 #if ( configUSE_QUEUE_SETS == 1 )
3262 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
3263 TickType_t const xTicksToWait )
3265 QueueSetMemberHandle_t xReturn = NULL;
3267 traceENTER_xQueueSelectFromSet( xQueueSet, xTicksToWait );
3269 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
3271 traceRETURN_xQueueSelectFromSet( xReturn );
3276 #endif /* configUSE_QUEUE_SETS */
3277 /*-----------------------------------------------------------*/
3279 #if ( configUSE_QUEUE_SETS == 1 )
3281 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
3283 QueueSetMemberHandle_t xReturn = NULL;
3285 traceENTER_xQueueSelectFromSetFromISR( xQueueSet );
3287 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
3289 traceRETURN_xQueueSelectFromSetFromISR( xReturn );
3294 #endif /* configUSE_QUEUE_SETS */
3295 /*-----------------------------------------------------------*/
3297 #if ( configUSE_QUEUE_SETS == 1 )
3299 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
3301 Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
3302 BaseType_t xReturn = pdFALSE;
3304 /* This function must be called form a critical section. */
3306 /* The following line is not reachable in unit tests because every call
3307 * to prvNotifyQueueSetContainer is preceded by a check that
3308 * pxQueueSetContainer != NULL */
3309 configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
3310 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
3312 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
3314 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
3316 traceQUEUE_SET_SEND( pxQueueSetContainer );
3318 /* The data copied is the handle of the queue that contains data. */
3319 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
3321 if( cTxLock == queueUNLOCKED )
3323 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
3325 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
3327 /* The task waiting has a higher priority. */
3332 mtCOVERAGE_TEST_MARKER();
3337 mtCOVERAGE_TEST_MARKER();
3342 prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
3347 mtCOVERAGE_TEST_MARKER();
3353 #endif /* configUSE_QUEUE_SETS */