2 * FreeRTOS Kernel V10.3.1
\r
3 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
\r
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
\r
6 * this software and associated documentation files (the "Software"), to deal in
\r
7 * the Software without restriction, including without limitation the rights to
\r
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
\r
9 * the Software, and to permit persons to whom the Software is furnished to do so,
\r
10 * subject to the following conditions:
\r
12 * The above copyright notice and this permission notice shall be included in all
\r
13 * copies or substantial portions of the Software.
\r
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
\r
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
\r
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
\r
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
22 * http://www.FreeRTOS.org
\r
23 * http://aws.amazon.com/freertos
\r
30 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
31 all the API functions to use the MPU wrappers. That should only be done when
\r
32 task.h is included from an application file. */
\r
33 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
35 #include "FreeRTOS.h"
\r
39 #if ( configUSE_CO_ROUTINES == 1 )
\r
40 #include "croutine.h"
\r
43 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
\r
44 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
\r
45 for the header files above, but not in this file, in order to generate the
\r
46 correct privileged Vs unprivileged linkage and placement. */
\r
47 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
\r
50 /* Constants used with the cRxLock and cTxLock structure members. */
\r
51 #define queueUNLOCKED ( ( int8_t ) -1 )
\r
52 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
\r
53 #define queueINT8_MAX ( ( int8_t ) 127 )
\r
55 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
56 pcTail members are used as pointers into the queue storage area. When the
\r
57 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
58 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
59 structure instead holds a pointer to the mutex holder (if any). Map alternative
\r
60 names to the pcHead and structure member to ensure the readability of the code
\r
61 is maintained. The QueuePointers_t and SemaphoreData_t types are used to form
\r
62 a union as their usage is mutually exclusive dependent on what the queue is
\r
64 #define uxQueueType pcHead
\r
65 #define queueQUEUE_IS_MUTEX NULL
\r
67 typedef struct QueuePointers
\r
69 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
70 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
73 typedef struct SemaphoreData
\r
75 TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
\r
76 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
79 /* Semaphores do not actually store or copy data, so have an item size of
\r
81 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
82 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
84 #if( configUSE_PREEMPTION == 0 )
\r
85 /* If the cooperative scheduler is being used then a yield should not be
\r
86 performed just because a higher priority task has been woken. */
\r
87 #define queueYIELD_IF_USING_PREEMPTION()
\r
89 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
93 * Definition of the queue used by the scheduler.
\r
94 * Items are queued by copy, not reference. See the following link for the
\r
95 * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html
\r
97 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
\r
99 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
100 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
104 QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
\r
105 SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
\r
108 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
109 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
111 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
112 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
113 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
115 volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
116 volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
118 #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
119 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
\r
122 #if ( configUSE_QUEUE_SETS == 1 )
\r
123 struct QueueDefinition *pxQueueSetContainer;
\r
126 #if ( configUSE_TRACE_FACILITY == 1 )
\r
127 UBaseType_t uxQueueNumber;
\r
128 uint8_t ucQueueType;
\r
133 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
134 name below to enable the use of older kernel aware debuggers. */
\r
135 typedef xQUEUE Queue_t;
\r
137 /*-----------------------------------------------------------*/
\r
140 * The queue registry is just a means for kernel aware debuggers to locate
\r
141 * queue structures. It has no other purpose so is an optional component.
\r
143 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
145 /* The type stored within the queue registry array. This allows a name
\r
146 to be assigned to each queue making kernel aware debugging a little
\r
147 more user friendly. */
\r
148 typedef struct QUEUE_REGISTRY_ITEM
\r
150 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
151 QueueHandle_t xHandle;
\r
152 } xQueueRegistryItem;
\r
154 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
155 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
157 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
159 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
160 The pcQueueName member of a structure being NULL is indicative of the
\r
161 array position being vacant. */
\r
162 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
164 #endif /* configQUEUE_REGISTRY_SIZE */
\r
167 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
168 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
169 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
170 * queue is locked it will instead increment the appropriate queue lock count
\r
171 * to indicate that a task may require unblocking. When the queue in unlocked
\r
172 * these lock counts are inspected, and the appropriate action taken.
\r
174 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
177 * Uses a critical section to determine if there is any data in a queue.
\r
179 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
181 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
184 * Uses a critical section to determine if there is any space in a queue.
\r
186 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
188 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
191 * Copies an item into the queue, either at the front of the queue or the
\r
192 * back of the queue.
\r
194 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
197 * Copies an item out of a queue.
\r
199 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
201 #if ( configUSE_QUEUE_SETS == 1 )
\r
203 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
204 * the queue set that the queue contains data.
\r
206 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
210 * Called after a Queue_t structure has been allocated either statically or
\r
211 * dynamically to fill in the structure's members.
\r
213 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
216 * Mutexes are a special type of queue. When a mutex is created, first the
\r
217 * queue is created, then prvInitialiseMutex() is called to configure the queue
\r
220 #if( configUSE_MUTEXES == 1 )
\r
221 static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
\r
224 #if( configUSE_MUTEXES == 1 )
\r
226 * If a task waiting for a mutex causes the mutex holder to inherit a
\r
227 * priority, but the waiting task times out, then the holder should
\r
228 * disinherit the priority - but only down to the highest priority of any
\r
229 * other tasks that are waiting for the same mutex. This function returns
\r
232 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
234 /*-----------------------------------------------------------*/
\r
237 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
238 * accessing the queue event lists.
\r
240 #define prvLockQueue( pxQueue ) \
\r
241 taskENTER_CRITICAL(); \
\r
243 if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
\r
245 ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
\r
247 if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
\r
249 ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
\r
252 taskEXIT_CRITICAL()
\r
253 /*-----------------------------------------------------------*/
\r
255 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
257 Queue_t * const pxQueue = xQueue;
\r
259 configASSERT( pxQueue );
\r
261 taskENTER_CRITICAL();
\r
263 pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
\r
264 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
265 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
266 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
\r
267 pxQueue->cRxLock = queueUNLOCKED;
\r
268 pxQueue->cTxLock = queueUNLOCKED;
\r
270 if( xNewQueue == pdFALSE )
\r
272 /* If there are tasks blocked waiting to read from the queue, then
\r
273 the tasks will remain blocked as after this function exits the queue
\r
274 will still be empty. If there are tasks blocked waiting to write to
\r
275 the queue, then one should be unblocked as after this function exits
\r
276 it will be possible to write to it. */
\r
277 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
279 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
281 queueYIELD_IF_USING_PREEMPTION();
\r
285 mtCOVERAGE_TEST_MARKER();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 /* Ensure the event queues start in the correct state. */
\r
296 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
297 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
300 taskEXIT_CRITICAL();
\r
302 /* A value is returned for calling semantic consistency with previous
\r
306 /*-----------------------------------------------------------*/
\r
308 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
310 QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
\r
312 Queue_t *pxNewQueue;
\r
314 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
316 /* The StaticQueue_t structure and the queue storage area must be
\r
318 configASSERT( pxStaticQueue != NULL );
\r
320 /* A queue storage area should be provided if the item size is not 0, and
\r
321 should not be provided if the item size is 0. */
\r
322 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
\r
323 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
\r
325 #if( configASSERT_DEFINED == 1 )
\r
327 /* Sanity check that the size of the structure used to declare a
\r
328 variable of type StaticQueue_t or StaticSemaphore_t equals the size of
\r
329 the real queue and semaphore structures. */
\r
330 volatile size_t xSize = sizeof( StaticQueue_t );
\r
331 configASSERT( xSize == sizeof( Queue_t ) );
\r
332 ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
\r
334 #endif /* configASSERT_DEFINED */
\r
336 /* The address of a statically allocated queue was passed in, use it.
\r
337 The address of a statically allocated storage area was also passed in
\r
338 but is already set. */
\r
339 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
\r
341 if( pxNewQueue != NULL )
\r
343 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
345 /* Queues can be allocated wither statically or dynamically, so
\r
346 note this queue was allocated statically in case the queue is
\r
348 pxNewQueue->ucStaticallyAllocated = pdTRUE;
\r
350 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
352 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
356 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
357 mtCOVERAGE_TEST_MARKER();
\r
363 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
364 /*-----------------------------------------------------------*/
\r
366 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
\r
368 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
370 Queue_t *pxNewQueue;
\r
371 size_t xQueueSizeInBytes;
\r
372 uint8_t *pucQueueStorage;
\r
374 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
376 /* Allocate enough space to hold the maximum number of items that
\r
377 can be in the queue at any time. It is valid for uxItemSize to be
\r
378 zero in the case the queue is used as a semaphore. */
\r
379 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
381 /* Check for multiplication overflow. */
\r
382 configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
\r
384 /* Allocate the queue and storage area. Justification for MISRA
\r
385 deviation as follows: pvPortMalloc() always ensures returned memory
\r
386 blocks are aligned per the requirements of the MCU stack. In this case
\r
387 pvPortMalloc() must return a pointer that is guaranteed to meet the
\r
388 alignment requirements of the Queue_t structure - which in this case
\r
389 is an int8_t *. Therefore, whenever the stack alignment requirements
\r
390 are greater than or equal to the pointer to char requirements the cast
\r
391 is safe. In other cases alignment requirements are not strict (one or
\r
393 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
\r
395 if( pxNewQueue != NULL )
\r
397 /* Jump past the queue structure to find the location of the queue
\r
399 pucQueueStorage = ( uint8_t * ) pxNewQueue;
\r
400 pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
\r
402 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
\r
404 /* Queues can be created either statically or dynamically, so
\r
405 note this task was created dynamically in case it is later
\r
407 pxNewQueue->ucStaticallyAllocated = pdFALSE;
\r
409 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
411 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
\r
415 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
416 mtCOVERAGE_TEST_MARKER();
\r
422 #endif /* configSUPPORT_STATIC_ALLOCATION */
\r
423 /*-----------------------------------------------------------*/
\r
425 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
\r
427 /* Remove compiler warnings about unused parameters should
\r
428 configUSE_TRACE_FACILITY not be set to 1. */
\r
429 ( void ) ucQueueType;
\r
431 if( uxItemSize == ( UBaseType_t ) 0 )
\r
433 /* No RAM was allocated for the queue storage area, but PC head cannot
\r
434 be set to NULL because NULL is used as a key to say the queue is used as
\r
435 a mutex. Therefore just set pcHead to point to the queue as a benign
\r
436 value that is known to be within the memory map. */
\r
437 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
441 /* Set the head to the start of the queue storage area. */
\r
442 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
\r
445 /* Initialise the queue members as described where the queue type is
\r
447 pxNewQueue->uxLength = uxQueueLength;
\r
448 pxNewQueue->uxItemSize = uxItemSize;
\r
449 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
451 #if ( configUSE_TRACE_FACILITY == 1 )
\r
453 pxNewQueue->ucQueueType = ucQueueType;
\r
455 #endif /* configUSE_TRACE_FACILITY */
\r
457 #if( configUSE_QUEUE_SETS == 1 )
\r
459 pxNewQueue->pxQueueSetContainer = NULL;
\r
461 #endif /* configUSE_QUEUE_SETS */
\r
463 traceQUEUE_CREATE( pxNewQueue );
\r
465 /*-----------------------------------------------------------*/
\r
467 #if( configUSE_MUTEXES == 1 )
\r
469 static void prvInitialiseMutex( Queue_t *pxNewQueue )
\r
471 if( pxNewQueue != NULL )
\r
473 /* The queue create function will set all the queue structure members
\r
474 correctly for a generic queue, but this function is creating a
\r
475 mutex. Overwrite those members that need to be set differently -
\r
476 in particular the information required for priority inheritance. */
\r
477 pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
\r
478 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
480 /* In case this is a recursive mutex. */
\r
481 pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
\r
483 traceCREATE_MUTEX( pxNewQueue );
\r
485 /* Start with the semaphore in the expected state. */
\r
486 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
490 traceCREATE_MUTEX_FAILED();
\r
494 #endif /* configUSE_MUTEXES */
\r
495 /*-----------------------------------------------------------*/
\r
497 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
499 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
501 QueueHandle_t xNewQueue;
\r
502 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
504 xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
\r
505 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
\r
510 #endif /* configUSE_MUTEXES */
\r
511 /*-----------------------------------------------------------*/
\r
513 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
515 QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
\r
517 QueueHandle_t xNewQueue;
\r
518 const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
\r
520 /* Prevent compiler warnings about unused parameters if
\r
521 configUSE_TRACE_FACILITY does not equal 1. */
\r
522 ( void ) ucQueueType;
\r
524 xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
\r
525 prvInitialiseMutex( ( Queue_t * ) xNewQueue );
\r
530 #endif /* configUSE_MUTEXES */
\r
531 /*-----------------------------------------------------------*/
\r
533 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
535 TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
537 TaskHandle_t pxReturn;
\r
538 Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
\r
540 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
541 be called directly. Note: This is a good way of determining if the
\r
542 calling task is the mutex holder, but not a good way of determining the
\r
543 identity of the mutex holder, as the holder may change between the
\r
544 following critical section exiting and the function returning. */
\r
545 taskENTER_CRITICAL();
\r
547 if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
\r
549 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
\r
556 taskEXIT_CRITICAL();
\r
559 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
562 /*-----------------------------------------------------------*/
\r
564 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
566 TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
\r
568 TaskHandle_t pxReturn;
\r
570 configASSERT( xSemaphore );
\r
572 /* Mutexes cannot be used in interrupt service routines, so the mutex
\r
573 holder should not change in an ISR, and therefore a critical section is
\r
574 not required here. */
\r
575 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
577 pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
\r
585 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
588 /*-----------------------------------------------------------*/
\r
590 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
592 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
594 BaseType_t xReturn;
\r
595 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
597 configASSERT( pxMutex );
\r
599 /* If this is the task that holds the mutex then xMutexHolder will not
\r
600 change outside of this task. If this task does not hold the mutex then
\r
601 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
602 this is the only condition we are interested in it does not matter if
\r
603 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
604 mutual exclusion is required to test the pxMutexHolder variable. */
\r
605 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
\r
607 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
609 /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
\r
610 the task handle, therefore no underflow check is required. Also,
\r
611 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
612 there can only be one, no mutual exclusion is required to modify the
\r
613 uxRecursiveCallCount member. */
\r
614 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
\r
616 /* Has the recursive call count unwound to 0? */
\r
617 if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
619 /* Return the mutex. This will automatically unblock any other
\r
620 task that might be waiting to access the mutex. */
\r
621 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
625 mtCOVERAGE_TEST_MARKER();
\r
632 /* The mutex cannot be given because the calling task is not the
\r
636 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
642 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
643 /*-----------------------------------------------------------*/
\r
645 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
647 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
649 BaseType_t xReturn;
\r
650 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
652 configASSERT( pxMutex );
\r
654 /* Comments regarding mutual exclusion as per those within
\r
655 xQueueGiveMutexRecursive(). */
\r
657 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
659 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
\r
661 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
\r
666 xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
\r
668 /* pdPASS will only be returned if the mutex was successfully
\r
669 obtained. The calling task may have entered the Blocked state
\r
670 before reaching here. */
\r
671 if( xReturn != pdFAIL )
\r
673 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
\r
677 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
684 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
685 /*-----------------------------------------------------------*/
\r
687 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
689 QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
\r
691 QueueHandle_t xHandle;
\r
693 configASSERT( uxMaxCount != 0 );
\r
694 configASSERT( uxInitialCount <= uxMaxCount );
\r
696 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
698 if( xHandle != NULL )
\r
700 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
702 traceCREATE_COUNTING_SEMAPHORE();
\r
706 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
712 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
713 /*-----------------------------------------------------------*/
\r
715 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
717 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
719 QueueHandle_t xHandle;
\r
721 configASSERT( uxMaxCount != 0 );
\r
722 configASSERT( uxInitialCount <= uxMaxCount );
\r
724 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
726 if( xHandle != NULL )
\r
728 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
730 traceCREATE_COUNTING_SEMAPHORE();
\r
734 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
740 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
\r
741 /*-----------------------------------------------------------*/
\r
743 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
745 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
746 TimeOut_t xTimeOut;
\r
747 Queue_t * const pxQueue = xQueue;
\r
749 configASSERT( pxQueue );
\r
750 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
751 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
752 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
754 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
759 /*lint -save -e904 This function relaxes the coding standard somewhat to
\r
760 allow return statements within the function itself. This is done in the
\r
761 interest of execution time efficiency. */
\r
764 taskENTER_CRITICAL();
\r
766 /* Is there room on the queue now? The running task must be the
\r
767 highest priority task wanting to access the queue. If the head item
\r
768 in the queue is to be overwritten then it does not matter if the
\r
770 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
772 traceQUEUE_SEND( pxQueue );
\r
774 #if ( configUSE_QUEUE_SETS == 1 )
\r
776 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
778 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
780 if( pxQueue->pxQueueSetContainer != NULL )
\r
782 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
\r
784 /* Do not notify the queue set as an existing item
\r
785 was overwritten in the queue so the number of items
\r
786 in the queue has not changed. */
\r
787 mtCOVERAGE_TEST_MARKER();
\r
789 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
\r
791 /* The queue is a member of a queue set, and posting
\r
792 to the queue set caused a higher priority task to
\r
793 unblock. A context switch is required. */
\r
794 queueYIELD_IF_USING_PREEMPTION();
\r
798 mtCOVERAGE_TEST_MARKER();
\r
803 /* If there was a task waiting for data to arrive on the
\r
804 queue then unblock it now. */
\r
805 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
807 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
809 /* The unblocked task has a priority higher than
\r
810 our own so yield immediately. Yes it is ok to
\r
811 do this from within the critical section - the
\r
812 kernel takes care of that. */
\r
813 queueYIELD_IF_USING_PREEMPTION();
\r
817 mtCOVERAGE_TEST_MARKER();
\r
820 else if( xYieldRequired != pdFALSE )
\r
822 /* This path is a special case that will only get
\r
823 executed if the task was holding multiple mutexes
\r
824 and the mutexes were given back in an order that is
\r
825 different to that in which they were taken. */
\r
826 queueYIELD_IF_USING_PREEMPTION();
\r
830 mtCOVERAGE_TEST_MARKER();
\r
834 #else /* configUSE_QUEUE_SETS */
\r
836 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
838 /* If there was a task waiting for data to arrive on the
\r
839 queue then unblock it now. */
\r
840 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
842 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
844 /* The unblocked task has a priority higher than
\r
845 our own so yield immediately. Yes it is ok to do
\r
846 this from within the critical section - the kernel
\r
847 takes care of that. */
\r
848 queueYIELD_IF_USING_PREEMPTION();
\r
852 mtCOVERAGE_TEST_MARKER();
\r
855 else if( xYieldRequired != pdFALSE )
\r
857 /* This path is a special case that will only get
\r
858 executed if the task was holding multiple mutexes and
\r
859 the mutexes were given back in an order that is
\r
860 different to that in which they were taken. */
\r
861 queueYIELD_IF_USING_PREEMPTION();
\r
865 mtCOVERAGE_TEST_MARKER();
\r
868 #endif /* configUSE_QUEUE_SETS */
\r
870 taskEXIT_CRITICAL();
\r
875 if( xTicksToWait == ( TickType_t ) 0 )
\r
877 /* The queue was full and no block time is specified (or
\r
878 the block time has expired) so leave now. */
\r
879 taskEXIT_CRITICAL();
\r
881 /* Return to the original privilege level before exiting
\r
883 traceQUEUE_SEND_FAILED( pxQueue );
\r
884 return errQUEUE_FULL;
\r
886 else if( xEntryTimeSet == pdFALSE )
\r
888 /* The queue was full and a block time was specified so
\r
889 configure the timeout structure. */
\r
890 vTaskInternalSetTimeOutState( &xTimeOut );
\r
891 xEntryTimeSet = pdTRUE;
\r
895 /* Entry time was already set. */
\r
896 mtCOVERAGE_TEST_MARKER();
\r
900 taskEXIT_CRITICAL();
\r
902 /* Interrupts and other tasks can send to and receive from the queue
\r
903 now the critical section has been exited. */
\r
906 prvLockQueue( pxQueue );
\r
908 /* Update the timeout state to see if it has expired yet. */
\r
909 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
911 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
913 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
914 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
916 /* Unlocking the queue means queue events can effect the
\r
917 event list. It is possible that interrupts occurring now
\r
918 remove this task from the event list again - but as the
\r
919 scheduler is suspended the task will go onto the pending
\r
920 ready last instead of the actual ready list. */
\r
921 prvUnlockQueue( pxQueue );
\r
923 /* Resuming the scheduler will move tasks from the pending
\r
924 ready list into the ready list - so it is feasible that this
\r
925 task is already in a ready list before it yields - in which
\r
926 case the yield will not cause a context switch unless there
\r
927 is also a higher priority task in the pending ready list. */
\r
928 if( xTaskResumeAll() == pdFALSE )
\r
930 portYIELD_WITHIN_API();
\r
936 prvUnlockQueue( pxQueue );
\r
937 ( void ) xTaskResumeAll();
\r
942 /* The timeout has expired. */
\r
943 prvUnlockQueue( pxQueue );
\r
944 ( void ) xTaskResumeAll();
\r
946 traceQUEUE_SEND_FAILED( pxQueue );
\r
947 return errQUEUE_FULL;
\r
949 } /*lint -restore */
\r
951 /*-----------------------------------------------------------*/
\r
953 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
955 BaseType_t xReturn;
\r
956 UBaseType_t uxSavedInterruptStatus;
\r
957 Queue_t * const pxQueue = xQueue;
\r
959 configASSERT( pxQueue );
\r
960 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
961 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
963 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
964 system call (or maximum API call) interrupt priority. Interrupts that are
\r
965 above the maximum system call priority are kept permanently enabled, even
\r
966 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
967 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
968 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
969 failure if a FreeRTOS API function is called from an interrupt that has been
\r
970 assigned a priority above the configured maximum system call priority.
\r
971 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
972 that have been assigned a priority at or (logically) below the maximum
\r
973 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
974 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
975 More information (albeit Cortex-M specific) is provided on the following
\r
976 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
977 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
979 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
980 in the queue. Also don't directly wake a task that was blocked on a queue
\r
981 read, instead return a flag to say whether a context switch is required or
\r
982 not (i.e. has a task with a higher priority than us been woken by this
\r
984 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
986 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
988 const int8_t cTxLock = pxQueue->cTxLock;
\r
989 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
991 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
993 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
994 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
995 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
996 called here even though the disinherit function does not check if
\r
997 the scheduler is suspended before accessing the ready lists. */
\r
998 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1000 /* The event list is not altered if the queue is locked. This will
\r
1001 be done when the queue is unlocked later. */
\r
1002 if( cTxLock == queueUNLOCKED )
\r
1004 #if ( configUSE_QUEUE_SETS == 1 )
\r
1006 if( pxQueue->pxQueueSetContainer != NULL )
\r
1008 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
\r
1010 /* Do not notify the queue set as an existing item
\r
1011 was overwritten in the queue so the number of items
\r
1012 in the queue has not changed. */
\r
1013 mtCOVERAGE_TEST_MARKER();
\r
1015 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
\r
1017 /* The queue is a member of a queue set, and posting
\r
1018 to the queue set caused a higher priority task to
\r
1019 unblock. A context switch is required. */
\r
1020 if( pxHigherPriorityTaskWoken != NULL )
\r
1022 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1026 mtCOVERAGE_TEST_MARKER();
\r
1031 mtCOVERAGE_TEST_MARKER();
\r
1036 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1038 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1040 /* The task waiting has a higher priority so
\r
1041 record that a context switch is required. */
\r
1042 if( pxHigherPriorityTaskWoken != NULL )
\r
1044 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1048 mtCOVERAGE_TEST_MARKER();
\r
1053 mtCOVERAGE_TEST_MARKER();
\r
1058 mtCOVERAGE_TEST_MARKER();
\r
1062 #else /* configUSE_QUEUE_SETS */
\r
1064 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1066 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1068 /* The task waiting has a higher priority so record that a
\r
1069 context switch is required. */
\r
1070 if( pxHigherPriorityTaskWoken != NULL )
\r
1072 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1076 mtCOVERAGE_TEST_MARKER();
\r
1081 mtCOVERAGE_TEST_MARKER();
\r
1086 mtCOVERAGE_TEST_MARKER();
\r
1089 /* Not used in this path. */
\r
1090 ( void ) uxPreviousMessagesWaiting;
\r
1092 #endif /* configUSE_QUEUE_SETS */
\r
1096 /* Increment the lock count so the task that unlocks the queue
\r
1097 knows that data was posted while it was locked. */
\r
1098 configASSERT( cTxLock != queueINT8_MAX);
\r
1100 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1107 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1108 xReturn = errQUEUE_FULL;
\r
1111 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1115 /*-----------------------------------------------------------*/
\r
1117 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1119 BaseType_t xReturn;
\r
1120 UBaseType_t uxSavedInterruptStatus;
\r
1121 Queue_t * const pxQueue = xQueue;
\r
1123 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1124 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1125 read, instead return a flag to say whether a context switch is required or
\r
1126 not (i.e. has a task with a higher priority than us been woken by this
\r
1129 configASSERT( pxQueue );
\r
1131 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1132 if the item size is not 0. */
\r
1133 configASSERT( pxQueue->uxItemSize == 0 );
\r
1135 /* Normally a mutex would not be given from an interrupt, especially if
\r
1136 there is a mutex holder, as priority inheritance makes no sense for an
\r
1137 interrupts, only tasks. */
\r
1138 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
\r
1140 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1141 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1142 above the maximum system call priority are kept permanently enabled, even
\r
1143 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1144 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1145 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1146 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1147 assigned a priority above the configured maximum system call priority.
\r
1148 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1149 that have been assigned a priority at or (logically) below the maximum
\r
1150 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1151 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1152 More information (albeit Cortex-M specific) is provided on the following
\r
1153 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1154 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1156 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1158 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1160 /* When the queue is used to implement a semaphore no data is ever
\r
1161 moved through the queue but it is still valid to see if the queue 'has
\r
1163 if( uxMessagesWaiting < pxQueue->uxLength )
\r
1165 const int8_t cTxLock = pxQueue->cTxLock;
\r
1167 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1169 /* A task can only have an inherited priority if it is a mutex
\r
1170 holder - and if there is a mutex holder then the mutex cannot be
\r
1171 given from an ISR. As this is the ISR version of the function it
\r
1172 can be assumed there is no mutex holder and no need to determine if
\r
1173 priority disinheritance is needed. Simply increase the count of
\r
1174 messages (semaphores) available. */
\r
1175 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
1177 /* The event list is not altered if the queue is locked. This will
\r
1178 be done when the queue is unlocked later. */
\r
1179 if( cTxLock == queueUNLOCKED )
\r
1181 #if ( configUSE_QUEUE_SETS == 1 )
\r
1183 if( pxQueue->pxQueueSetContainer != NULL )
\r
1185 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
\r
1187 /* The semaphore is a member of a queue set, and
\r
1188 posting to the queue set caused a higher priority
\r
1189 task to unblock. A context switch is required. */
\r
1190 if( pxHigherPriorityTaskWoken != NULL )
\r
1192 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1196 mtCOVERAGE_TEST_MARKER();
\r
1201 mtCOVERAGE_TEST_MARKER();
\r
1206 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1208 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1210 /* The task waiting has a higher priority so
\r
1211 record that a context switch is required. */
\r
1212 if( pxHigherPriorityTaskWoken != NULL )
\r
1214 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1218 mtCOVERAGE_TEST_MARKER();
\r
1223 mtCOVERAGE_TEST_MARKER();
\r
1228 mtCOVERAGE_TEST_MARKER();
\r
1232 #else /* configUSE_QUEUE_SETS */
\r
1234 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1236 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1238 /* The task waiting has a higher priority so record that a
\r
1239 context switch is required. */
\r
1240 if( pxHigherPriorityTaskWoken != NULL )
\r
1242 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1246 mtCOVERAGE_TEST_MARKER();
\r
1251 mtCOVERAGE_TEST_MARKER();
\r
1256 mtCOVERAGE_TEST_MARKER();
\r
1259 #endif /* configUSE_QUEUE_SETS */
\r
1263 /* Increment the lock count so the task that unlocks the queue
\r
1264 knows that data was posted while it was locked. */
\r
1265 configASSERT( cTxLock != queueINT8_MAX);
\r
1267 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
1274 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1275 xReturn = errQUEUE_FULL;
\r
1278 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1282 /*-----------------------------------------------------------*/
\r
1284 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1286 BaseType_t xEntryTimeSet = pdFALSE;
\r
1287 TimeOut_t xTimeOut;
\r
1288 Queue_t * const pxQueue = xQueue;
\r
1290 /* Check the pointer is not NULL. */
\r
1291 configASSERT( ( pxQueue ) );
\r
1293 /* The buffer into which data is received can only be NULL if the data size
\r
1294 is zero (so no data is copied into the buffer). */
\r
1295 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1297 /* Cannot block if the scheduler is suspended. */
\r
1298 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1300 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1305 /*lint -save -e904 This function relaxes the coding standard somewhat to
\r
1306 allow return statements within the function itself. This is done in the
\r
1307 interest of execution time efficiency. */
\r
1310 taskENTER_CRITICAL();
\r
1312 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1314 /* Is there data in the queue now? To be running the calling task
\r
1315 must be the highest priority task wanting to access the queue. */
\r
1316 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1318 /* Data available, remove one item. */
\r
1319 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1320 traceQUEUE_RECEIVE( pxQueue );
\r
1321 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1323 /* There is now space in the queue, were any tasks waiting to
\r
1324 post to the queue? If so, unblock the highest priority waiting
\r
1326 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1328 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1330 queueYIELD_IF_USING_PREEMPTION();
\r
1334 mtCOVERAGE_TEST_MARKER();
\r
1339 mtCOVERAGE_TEST_MARKER();
\r
1342 taskEXIT_CRITICAL();
\r
1347 if( xTicksToWait == ( TickType_t ) 0 )
\r
1349 /* The queue was empty and no block time is specified (or
\r
1350 the block time has expired) so leave now. */
\r
1351 taskEXIT_CRITICAL();
\r
1352 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1353 return errQUEUE_EMPTY;
\r
1355 else if( xEntryTimeSet == pdFALSE )
\r
1357 /* The queue was empty and a block time was specified so
\r
1358 configure the timeout structure. */
\r
1359 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1360 xEntryTimeSet = pdTRUE;
\r
1364 /* Entry time was already set. */
\r
1365 mtCOVERAGE_TEST_MARKER();
\r
1369 taskEXIT_CRITICAL();
\r
1371 /* Interrupts and other tasks can send to and receive from the queue
\r
1372 now the critical section has been exited. */
\r
1374 vTaskSuspendAll();
\r
1375 prvLockQueue( pxQueue );
\r
1377 /* Update the timeout state to see if it has expired yet. */
\r
1378 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1380 /* The timeout has not expired. If the queue is still empty place
\r
1381 the task on the list of tasks waiting to receive from the queue. */
\r
1382 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1384 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1385 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1386 prvUnlockQueue( pxQueue );
\r
1387 if( xTaskResumeAll() == pdFALSE )
\r
1389 portYIELD_WITHIN_API();
\r
1393 mtCOVERAGE_TEST_MARKER();
\r
1398 /* The queue contains data again. Loop back to try and read the
\r
1400 prvUnlockQueue( pxQueue );
\r
1401 ( void ) xTaskResumeAll();
\r
1406 /* Timed out. If there is no data in the queue exit, otherwise loop
\r
1407 back and attempt to read the data. */
\r
1408 prvUnlockQueue( pxQueue );
\r
1409 ( void ) xTaskResumeAll();
\r
1411 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1413 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1414 return errQUEUE_EMPTY;
\r
1418 mtCOVERAGE_TEST_MARKER();
\r
1421 } /*lint -restore */
\r
1423 /*-----------------------------------------------------------*/
\r
1425 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )
\r
1427 BaseType_t xEntryTimeSet = pdFALSE;
\r
1428 TimeOut_t xTimeOut;
\r
1429 Queue_t * const pxQueue = xQueue;
\r
1431 #if( configUSE_MUTEXES == 1 )
\r
1432 BaseType_t xInheritanceOccurred = pdFALSE;
\r
1435 /* Check the queue pointer is not NULL. */
\r
1436 configASSERT( ( pxQueue ) );
\r
1438 /* Check this really is a semaphore, in which case the item size will be
\r
1440 configASSERT( pxQueue->uxItemSize == 0 );
\r
1442 /* Cannot block if the scheduler is suspended. */
\r
1443 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1445 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1450 /*lint -save -e904 This function relaxes the coding standard somewhat to allow return
\r
1451 statements within the function itself. This is done in the interest
\r
1452 of execution time efficiency. */
\r
1455 taskENTER_CRITICAL();
\r
1457 /* Semaphores are queues with an item size of 0, and where the
\r
1458 number of messages in the queue is the semaphore's count value. */
\r
1459 const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
\r
1461 /* Is there data in the queue now? To be running the calling task
\r
1462 must be the highest priority task wanting to access the queue. */
\r
1463 if( uxSemaphoreCount > ( UBaseType_t ) 0 )
\r
1465 traceQUEUE_RECEIVE( pxQueue );
\r
1467 /* Semaphores are queues with a data size of zero and where the
\r
1468 messages waiting is the semaphore's count. Reduce the count. */
\r
1469 pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;
\r
1471 #if ( configUSE_MUTEXES == 1 )
\r
1473 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1475 /* Record the information required to implement
\r
1476 priority inheritance should it become necessary. */
\r
1477 pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
\r
1481 mtCOVERAGE_TEST_MARKER();
\r
1484 #endif /* configUSE_MUTEXES */
\r
1486 /* Check to see if other tasks are blocked waiting to give the
\r
1487 semaphore, and if so, unblock the highest priority such task. */
\r
1488 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1490 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1492 queueYIELD_IF_USING_PREEMPTION();
\r
1496 mtCOVERAGE_TEST_MARKER();
\r
1501 mtCOVERAGE_TEST_MARKER();
\r
1504 taskEXIT_CRITICAL();
\r
1509 if( xTicksToWait == ( TickType_t ) 0 )
\r
1511 /* For inheritance to have occurred there must have been an
\r
1512 initial timeout, and an adjusted timeout cannot become 0, as
\r
1513 if it were 0 the function would have exited. */
\r
1514 #if( configUSE_MUTEXES == 1 )
\r
1516 configASSERT( xInheritanceOccurred == pdFALSE );
\r
1518 #endif /* configUSE_MUTEXES */
\r
1520 /* The semaphore count was 0 and no block time is specified
\r
1521 (or the block time has expired) so exit now. */
\r
1522 taskEXIT_CRITICAL();
\r
1523 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1524 return errQUEUE_EMPTY;
\r
1526 else if( xEntryTimeSet == pdFALSE )
\r
1528 /* The semaphore count was 0 and a block time was specified
\r
1529 so configure the timeout structure ready to block. */
\r
1530 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1531 xEntryTimeSet = pdTRUE;
\r
1535 /* Entry time was already set. */
\r
1536 mtCOVERAGE_TEST_MARKER();
\r
1540 taskEXIT_CRITICAL();
\r
1542 /* Interrupts and other tasks can give to and take from the semaphore
\r
1543 now the critical section has been exited. */
\r
1545 vTaskSuspendAll();
\r
1546 prvLockQueue( pxQueue );
\r
1548 /* Update the timeout state to see if it has expired yet. */
\r
1549 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1551 /* A block time is specified and not expired. If the semaphore
\r
1552 count is 0 then enter the Blocked state to wait for a semaphore to
\r
1553 become available. As semaphores are implemented with queues the
\r
1554 queue being empty is equivalent to the semaphore count being 0. */
\r
1555 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1557 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1559 #if ( configUSE_MUTEXES == 1 )
\r
1561 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1563 taskENTER_CRITICAL();
\r
1565 xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
\r
1567 taskEXIT_CRITICAL();
\r
1571 mtCOVERAGE_TEST_MARKER();
\r
1576 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1577 prvUnlockQueue( pxQueue );
\r
1578 if( xTaskResumeAll() == pdFALSE )
\r
1580 portYIELD_WITHIN_API();
\r
1584 mtCOVERAGE_TEST_MARKER();
\r
1589 /* There was no timeout and the semaphore count was not 0, so
\r
1590 attempt to take the semaphore again. */
\r
1591 prvUnlockQueue( pxQueue );
\r
1592 ( void ) xTaskResumeAll();
\r
1598 prvUnlockQueue( pxQueue );
\r
1599 ( void ) xTaskResumeAll();
\r
1601 /* If the semaphore count is 0 exit now as the timeout has
\r
1602 expired. Otherwise return to attempt to take the semaphore that is
\r
1603 known to be available. As semaphores are implemented by queues the
\r
1604 queue being empty is equivalent to the semaphore count being 0. */
\r
1605 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1607 #if ( configUSE_MUTEXES == 1 )
\r
1609 /* xInheritanceOccurred could only have be set if
\r
1610 pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
\r
1611 test the mutex type again to check it is actually a mutex. */
\r
1612 if( xInheritanceOccurred != pdFALSE )
\r
1614 taskENTER_CRITICAL();
\r
1616 UBaseType_t uxHighestWaitingPriority;
\r
1618 /* This task blocking on the mutex caused another
\r
1619 task to inherit this task's priority. Now this task
\r
1620 has timed out the priority should be disinherited
\r
1621 again, but only as low as the next highest priority
\r
1622 task that is waiting for the same mutex. */
\r
1623 uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
\r
1624 vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
\r
1626 taskEXIT_CRITICAL();
\r
1629 #endif /* configUSE_MUTEXES */
\r
1631 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1632 return errQUEUE_EMPTY;
\r
1636 mtCOVERAGE_TEST_MARKER();
\r
1639 } /*lint -restore */
\r
1641 /*-----------------------------------------------------------*/
\r
1643 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
\r
1645 BaseType_t xEntryTimeSet = pdFALSE;
\r
1646 TimeOut_t xTimeOut;
\r
1647 int8_t *pcOriginalReadPosition;
\r
1648 Queue_t * const pxQueue = xQueue;
\r
1650 /* Check the pointer is not NULL. */
\r
1651 configASSERT( ( pxQueue ) );
\r
1653 /* The buffer into which data is received can only be NULL if the data size
\r
1654 is zero (so no data is copied into the buffer. */
\r
1655 configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1657 /* Cannot block if the scheduler is suspended. */
\r
1658 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1660 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1665 /*lint -save -e904 This function relaxes the coding standard somewhat to
\r
1666 allow return statements within the function itself. This is done in the
\r
1667 interest of execution time efficiency. */
\r
1670 taskENTER_CRITICAL();
\r
1672 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1674 /* Is there data in the queue now? To be running the calling task
\r
1675 must be the highest priority task wanting to access the queue. */
\r
1676 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1678 /* Remember the read position so it can be reset after the data
\r
1679 is read from the queue as this function is only peeking the
\r
1680 data, not removing it. */
\r
1681 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
\r
1683 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1684 traceQUEUE_PEEK( pxQueue );
\r
1686 /* The data is not being removed, so reset the read pointer. */
\r
1687 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
\r
1689 /* The data is being left in the queue, so see if there are
\r
1690 any other tasks waiting for the data. */
\r
1691 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1693 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1695 /* The task waiting has a higher priority than this task. */
\r
1696 queueYIELD_IF_USING_PREEMPTION();
\r
1700 mtCOVERAGE_TEST_MARKER();
\r
1705 mtCOVERAGE_TEST_MARKER();
\r
1708 taskEXIT_CRITICAL();
\r
1713 if( xTicksToWait == ( TickType_t ) 0 )
\r
1715 /* The queue was empty and no block time is specified (or
\r
1716 the block time has expired) so leave now. */
\r
1717 taskEXIT_CRITICAL();
\r
1718 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1719 return errQUEUE_EMPTY;
\r
1721 else if( xEntryTimeSet == pdFALSE )
\r
1723 /* The queue was empty and a block time was specified so
\r
1724 configure the timeout structure ready to enter the blocked
\r
1726 vTaskInternalSetTimeOutState( &xTimeOut );
\r
1727 xEntryTimeSet = pdTRUE;
\r
1731 /* Entry time was already set. */
\r
1732 mtCOVERAGE_TEST_MARKER();
\r
1736 taskEXIT_CRITICAL();
\r
1738 /* Interrupts and other tasks can send to and receive from the queue
\r
1739 now the critical section has been exited. */
\r
1741 vTaskSuspendAll();
\r
1742 prvLockQueue( pxQueue );
\r
1744 /* Update the timeout state to see if it has expired yet. */
\r
1745 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1747 /* Timeout has not expired yet, check to see if there is data in the
\r
1748 queue now, and if not enter the Blocked state to wait for data. */
\r
1749 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1751 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
\r
1752 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1753 prvUnlockQueue( pxQueue );
\r
1754 if( xTaskResumeAll() == pdFALSE )
\r
1756 portYIELD_WITHIN_API();
\r
1760 mtCOVERAGE_TEST_MARKER();
\r
1765 /* There is data in the queue now, so don't enter the blocked
\r
1766 state, instead return to try and obtain the data. */
\r
1767 prvUnlockQueue( pxQueue );
\r
1768 ( void ) xTaskResumeAll();
\r
1773 /* The timeout has expired. If there is still no data in the queue
\r
1774 exit, otherwise go back and try to read the data again. */
\r
1775 prvUnlockQueue( pxQueue );
\r
1776 ( void ) xTaskResumeAll();
\r
1778 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1780 traceQUEUE_PEEK_FAILED( pxQueue );
\r
1781 return errQUEUE_EMPTY;
\r
1785 mtCOVERAGE_TEST_MARKER();
\r
1788 } /*lint -restore */
\r
1790 /*-----------------------------------------------------------*/
\r
1792 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1794 BaseType_t xReturn;
\r
1795 UBaseType_t uxSavedInterruptStatus;
\r
1796 Queue_t * const pxQueue = xQueue;
\r
1798 configASSERT( pxQueue );
\r
1799 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1801 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1802 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1803 above the maximum system call priority are kept permanently enabled, even
\r
1804 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1805 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1806 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1807 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1808 assigned a priority above the configured maximum system call priority.
\r
1809 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1810 that have been assigned a priority at or (logically) below the maximum
\r
1811 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1812 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1813 More information (albeit Cortex-M specific) is provided on the following
\r
1814 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1815 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1817 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1819 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
1821 /* Cannot block in an ISR, so check there is data available. */
\r
1822 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1824 const int8_t cRxLock = pxQueue->cRxLock;
\r
1826 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1828 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1829 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
\r
1831 /* If the queue is locked the event list will not be modified.
\r
1832 Instead update the lock count so the task that unlocks the queue
\r
1833 will know that an ISR has removed data while the queue was
\r
1835 if( cRxLock == queueUNLOCKED )
\r
1837 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1839 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1841 /* The task waiting has a higher priority than us so
\r
1842 force a context switch. */
\r
1843 if( pxHigherPriorityTaskWoken != NULL )
\r
1845 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1849 mtCOVERAGE_TEST_MARKER();
\r
1854 mtCOVERAGE_TEST_MARKER();
\r
1859 mtCOVERAGE_TEST_MARKER();
\r
1864 /* Increment the lock count so the task that unlocks the queue
\r
1865 knows that data was removed while it was locked. */
\r
1866 configASSERT( cRxLock != queueINT8_MAX);
\r
1868 pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
\r
1876 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1879 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1883 /*-----------------------------------------------------------*/
\r
1885 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1887 BaseType_t xReturn;
\r
1888 UBaseType_t uxSavedInterruptStatus;
\r
1889 int8_t *pcOriginalReadPosition;
\r
1890 Queue_t * const pxQueue = xQueue;
\r
1892 configASSERT( pxQueue );
\r
1893 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1894 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1896 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1897 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1898 above the maximum system call priority are kept permanently enabled, even
\r
1899 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1900 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1901 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1902 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1903 assigned a priority above the configured maximum system call priority.
\r
1904 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1905 that have been assigned a priority at or (logically) below the maximum
\r
1906 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1907 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1908 More information (albeit Cortex-M specific) is provided on the following
\r
1909 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1910 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1912 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1914 /* Cannot block in an ISR, so check there is data available. */
\r
1915 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1917 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1919 /* Remember the read position so it can be reset as nothing is
\r
1920 actually being removed from the queue. */
\r
1921 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
\r
1922 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1923 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
\r
1930 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1933 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1937 /*-----------------------------------------------------------*/
\r
1939 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1941 UBaseType_t uxReturn;
\r
1943 configASSERT( xQueue );
\r
1945 taskENTER_CRITICAL();
\r
1947 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1949 taskEXIT_CRITICAL();
\r
1952 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1953 /*-----------------------------------------------------------*/
\r
1955 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1957 UBaseType_t uxReturn;
\r
1958 Queue_t * const pxQueue = xQueue;
\r
1960 configASSERT( pxQueue );
\r
1962 taskENTER_CRITICAL();
\r
1964 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1966 taskEXIT_CRITICAL();
\r
1969 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1970 /*-----------------------------------------------------------*/
\r
1972 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1974 UBaseType_t uxReturn;
\r
1975 Queue_t * const pxQueue = xQueue;
\r
1977 configASSERT( pxQueue );
\r
1978 uxReturn = pxQueue->uxMessagesWaiting;
\r
1981 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1982 /*-----------------------------------------------------------*/
\r
1984 void vQueueDelete( QueueHandle_t xQueue )
\r
1986 Queue_t * const pxQueue = xQueue;
\r
1988 configASSERT( pxQueue );
\r
1989 traceQUEUE_DELETE( pxQueue );
\r
1991 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1993 vQueueUnregisterQueue( pxQueue );
\r
1997 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
\r
1999 /* The queue can only have been allocated dynamically - free it
\r
2001 vPortFree( pxQueue );
\r
2003 #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
\r
2005 /* The queue could have been allocated statically or dynamically, so
\r
2006 check before attempting to free the memory. */
\r
2007 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
\r
2009 vPortFree( pxQueue );
\r
2013 mtCOVERAGE_TEST_MARKER();
\r
2018 /* The queue must have been statically allocated, so is not going to be
\r
2019 deleted. Avoid compiler warnings about the unused parameter. */
\r
2022 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
\r
2024 /*-----------------------------------------------------------*/
\r
2026 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2028 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
2030 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
2033 #endif /* configUSE_TRACE_FACILITY */
\r
2034 /*-----------------------------------------------------------*/
\r
2036 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2038 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
2040 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
2043 #endif /* configUSE_TRACE_FACILITY */
\r
2044 /*-----------------------------------------------------------*/
\r
2046 #if ( configUSE_TRACE_FACILITY == 1 )
\r
2048 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
2050 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
2053 #endif /* configUSE_TRACE_FACILITY */
\r
2054 /*-----------------------------------------------------------*/
\r
2056 #if( configUSE_MUTEXES == 1 )
\r
2058 static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
\r
2060 UBaseType_t uxHighestPriorityOfWaitingTasks;
\r
2062 /* If a task waiting for a mutex causes the mutex holder to inherit a
\r
2063 priority, but the waiting task times out, then the holder should
\r
2064 disinherit the priority - but only down to the highest priority of any
\r
2065 other tasks that are waiting for the same mutex. For this purpose,
\r
2066 return the priority of the highest priority task that is waiting for the
\r
2068 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
\r
2070 uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );
\r
2074 uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
\r
2077 return uxHighestPriorityOfWaitingTasks;
\r
2080 #endif /* configUSE_MUTEXES */
\r
2081 /*-----------------------------------------------------------*/
\r
2083 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
2085 BaseType_t xReturn = pdFALSE;
\r
2086 UBaseType_t uxMessagesWaiting;
\r
2088 /* This function is called from a critical section. */
\r
2090 uxMessagesWaiting = pxQueue->uxMessagesWaiting;
\r
2092 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
2094 #if ( configUSE_MUTEXES == 1 )
\r
2096 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
2098 /* The mutex is no longer being held. */
\r
2099 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
\r
2100 pxQueue->u.xSemaphore.xMutexHolder = NULL;
\r
2104 mtCOVERAGE_TEST_MARKER();
\r
2107 #endif /* configUSE_MUTEXES */
\r
2109 else if( xPosition == queueSEND_TO_BACK )
\r
2111 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
\r
2112 pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
\r
2113 if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2115 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
2119 mtCOVERAGE_TEST_MARKER();
\r
2124 ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
\r
2125 pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
\r
2126 if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
2128 pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
\r
2132 mtCOVERAGE_TEST_MARKER();
\r
2135 if( xPosition == queueOVERWRITE )
\r
2137 if( uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2139 /* An item is not being added but overwritten, so subtract
\r
2140 one from the recorded number of items in the queue so when
\r
2141 one is added again below the number of recorded items remains
\r
2143 --uxMessagesWaiting;
\r
2147 mtCOVERAGE_TEST_MARKER();
\r
2152 mtCOVERAGE_TEST_MARKER();
\r
2156 pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
\r
2160 /*-----------------------------------------------------------*/
\r
2162 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
2164 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
2166 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
\r
2167 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
2169 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
\r
2173 mtCOVERAGE_TEST_MARKER();
\r
2175 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
\r
2178 /*-----------------------------------------------------------*/
\r
2180 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
2182 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
2184 /* The lock counts contains the number of extra data items placed or
\r
2185 removed from the queue while the queue was locked. When a queue is
\r
2186 locked items can be added or removed, but the event lists cannot be
\r
2188 taskENTER_CRITICAL();
\r
2190 int8_t cTxLock = pxQueue->cTxLock;
\r
2192 /* See if data was added to the queue while it was locked. */
\r
2193 while( cTxLock > queueLOCKED_UNMODIFIED )
\r
2195 /* Data was posted while the queue was locked. Are any tasks
\r
2196 blocked waiting for data to become available? */
\r
2197 #if ( configUSE_QUEUE_SETS == 1 )
\r
2199 if( pxQueue->pxQueueSetContainer != NULL )
\r
2201 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
\r
2203 /* The queue is a member of a queue set, and posting to
\r
2204 the queue set caused a higher priority task to unblock.
\r
2205 A context switch is required. */
\r
2206 vTaskMissedYield();
\r
2210 mtCOVERAGE_TEST_MARKER();
\r
2215 /* Tasks that are removed from the event list will get
\r
2216 added to the pending ready list as the scheduler is still
\r
2218 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2220 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2222 /* The task waiting has a higher priority so record that a
\r
2223 context switch is required. */
\r
2224 vTaskMissedYield();
\r
2228 mtCOVERAGE_TEST_MARKER();
\r
2237 #else /* configUSE_QUEUE_SETS */
\r
2239 /* Tasks that are removed from the event list will get added to
\r
2240 the pending ready list as the scheduler is still suspended. */
\r
2241 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2243 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2245 /* The task waiting has a higher priority so record that
\r
2246 a context switch is required. */
\r
2247 vTaskMissedYield();
\r
2251 mtCOVERAGE_TEST_MARKER();
\r
2259 #endif /* configUSE_QUEUE_SETS */
\r
2264 pxQueue->cTxLock = queueUNLOCKED;
\r
2266 taskEXIT_CRITICAL();
\r
2268 /* Do the same for the Rx lock. */
\r
2269 taskENTER_CRITICAL();
\r
2271 int8_t cRxLock = pxQueue->cRxLock;
\r
2273 while( cRxLock > queueLOCKED_UNMODIFIED )
\r
2275 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2277 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2279 vTaskMissedYield();
\r
2283 mtCOVERAGE_TEST_MARKER();
\r
2294 pxQueue->cRxLock = queueUNLOCKED;
\r
2296 taskEXIT_CRITICAL();
\r
2298 /*-----------------------------------------------------------*/
\r
2300 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
2302 BaseType_t xReturn;
\r
2304 taskENTER_CRITICAL();
\r
2306 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2312 xReturn = pdFALSE;
\r
2315 taskEXIT_CRITICAL();
\r
2319 /*-----------------------------------------------------------*/
\r
2321 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2323 BaseType_t xReturn;
\r
2324 Queue_t * const pxQueue = xQueue;
\r
2326 configASSERT( pxQueue );
\r
2327 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2333 xReturn = pdFALSE;
\r
2337 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2338 /*-----------------------------------------------------------*/
\r
2340 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2342 BaseType_t xReturn;
\r
2344 taskENTER_CRITICAL();
\r
2346 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2352 xReturn = pdFALSE;
\r
2355 taskEXIT_CRITICAL();
\r
2359 /*-----------------------------------------------------------*/
\r
2361 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2363 BaseType_t xReturn;
\r
2364 Queue_t * const pxQueue = xQueue;
\r
2366 configASSERT( pxQueue );
\r
2367 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2373 xReturn = pdFALSE;
\r
2377 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2378 /*-----------------------------------------------------------*/
\r
2380 #if ( configUSE_CO_ROUTINES == 1 )
\r
2382 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2384 BaseType_t xReturn;
\r
2385 Queue_t * const pxQueue = xQueue;
\r
2387 /* If the queue is already full we may have to block. A critical section
\r
2388 is required to prevent an interrupt removing something from the queue
\r
2389 between the check to see if the queue is full and blocking on the queue. */
\r
2390 portDISABLE_INTERRUPTS();
\r
2392 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2394 /* The queue is full - do we want to block or just leave without
\r
2396 if( xTicksToWait > ( TickType_t ) 0 )
\r
2398 /* As this is called from a coroutine we cannot block directly, but
\r
2399 return indicating that we need to block. */
\r
2400 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2401 portENABLE_INTERRUPTS();
\r
2402 return errQUEUE_BLOCKED;
\r
2406 portENABLE_INTERRUPTS();
\r
2407 return errQUEUE_FULL;
\r
2411 portENABLE_INTERRUPTS();
\r
2413 portDISABLE_INTERRUPTS();
\r
2415 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2417 /* There is room in the queue, copy the data into the queue. */
\r
2418 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2421 /* Were any co-routines waiting for data to become available? */
\r
2422 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2424 /* In this instance the co-routine could be placed directly
\r
2425 into the ready list as we are within a critical section.
\r
2426 Instead the same pending ready list mechanism is used as if
\r
2427 the event were caused from within an interrupt. */
\r
2428 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2430 /* The co-routine waiting has a higher priority so record
\r
2431 that a yield might be appropriate. */
\r
2432 xReturn = errQUEUE_YIELD;
\r
2436 mtCOVERAGE_TEST_MARKER();
\r
2441 mtCOVERAGE_TEST_MARKER();
\r
2446 xReturn = errQUEUE_FULL;
\r
2449 portENABLE_INTERRUPTS();
\r
2454 #endif /* configUSE_CO_ROUTINES */
\r
2455 /*-----------------------------------------------------------*/
\r
2457 #if ( configUSE_CO_ROUTINES == 1 )
\r
2459 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2461 BaseType_t xReturn;
\r
2462 Queue_t * const pxQueue = xQueue;
\r
2464 /* If the queue is already empty we may have to block. A critical section
\r
2465 is required to prevent an interrupt adding something to the queue
\r
2466 between the check to see if the queue is empty and blocking on the queue. */
\r
2467 portDISABLE_INTERRUPTS();
\r
2469 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2471 /* There are no messages in the queue, do we want to block or just
\r
2472 leave with nothing? */
\r
2473 if( xTicksToWait > ( TickType_t ) 0 )
\r
2475 /* As this is a co-routine we cannot block directly, but return
\r
2476 indicating that we need to block. */
\r
2477 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2478 portENABLE_INTERRUPTS();
\r
2479 return errQUEUE_BLOCKED;
\r
2483 portENABLE_INTERRUPTS();
\r
2484 return errQUEUE_FULL;
\r
2489 mtCOVERAGE_TEST_MARKER();
\r
2492 portENABLE_INTERRUPTS();
\r
2494 portDISABLE_INTERRUPTS();
\r
2496 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2498 /* Data is available from the queue. */
\r
2499 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
\r
2500 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
\r
2502 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
\r
2506 mtCOVERAGE_TEST_MARKER();
\r
2508 --( pxQueue->uxMessagesWaiting );
\r
2509 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2513 /* Were any co-routines waiting for space to become available? */
\r
2514 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2516 /* In this instance the co-routine could be placed directly
\r
2517 into the ready list as we are within a critical section.
\r
2518 Instead the same pending ready list mechanism is used as if
\r
2519 the event were caused from within an interrupt. */
\r
2520 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2522 xReturn = errQUEUE_YIELD;
\r
2526 mtCOVERAGE_TEST_MARKER();
\r
2531 mtCOVERAGE_TEST_MARKER();
\r
2539 portENABLE_INTERRUPTS();
\r
2544 #endif /* configUSE_CO_ROUTINES */
\r
2545 /*-----------------------------------------------------------*/
\r
2547 #if ( configUSE_CO_ROUTINES == 1 )
\r
2549 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2551 Queue_t * const pxQueue = xQueue;
\r
2553 /* Cannot block within an ISR so if there is no space on the queue then
\r
2554 exit without doing anything. */
\r
2555 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2557 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2559 /* We only want to wake one co-routine per ISR, so check that a
\r
2560 co-routine has not already been woken. */
\r
2561 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2563 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2565 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2571 mtCOVERAGE_TEST_MARKER();
\r
2576 mtCOVERAGE_TEST_MARKER();
\r
2581 mtCOVERAGE_TEST_MARKER();
\r
2586 mtCOVERAGE_TEST_MARKER();
\r
2589 return xCoRoutinePreviouslyWoken;
\r
2592 #endif /* configUSE_CO_ROUTINES */
\r
2593 /*-----------------------------------------------------------*/
\r
2595 #if ( configUSE_CO_ROUTINES == 1 )
\r
2597 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2599 BaseType_t xReturn;
\r
2600 Queue_t * const pxQueue = xQueue;
\r
2602 /* We cannot block from an ISR, so check there is data available. If
\r
2603 not then just leave without doing anything. */
\r
2604 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2606 /* Copy the data from the queue. */
\r
2607 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
\r
2608 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
\r
2610 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
\r
2614 mtCOVERAGE_TEST_MARKER();
\r
2616 --( pxQueue->uxMessagesWaiting );
\r
2617 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2619 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2621 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2623 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2625 *pxCoRoutineWoken = pdTRUE;
\r
2629 mtCOVERAGE_TEST_MARKER();
\r
2634 mtCOVERAGE_TEST_MARKER();
\r
2639 mtCOVERAGE_TEST_MARKER();
\r
2652 #endif /* configUSE_CO_ROUTINES */
\r
2653 /*-----------------------------------------------------------*/
\r
2655 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2657 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2661 /* See if there is an empty space in the registry. A NULL name denotes
\r
2663 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2665 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2667 /* Store the information on this queue. */
\r
2668 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2669 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2671 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2676 mtCOVERAGE_TEST_MARKER();
\r
2681 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2682 /*-----------------------------------------------------------*/
\r
2684 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2686 const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2689 const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2691 /* Note there is nothing here to protect against another task adding or
\r
2692 removing entries from the registry while it is being searched. */
\r
2693 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2695 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2697 pcReturn = xQueueRegistry[ ux ].pcQueueName;
\r
2702 mtCOVERAGE_TEST_MARKER();
\r
2707 } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
\r
2709 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2710 /*-----------------------------------------------------------*/
\r
2712 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2714 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2718 /* See if the handle of the queue being unregistered in actually in the
\r
2720 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2722 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2724 /* Set the name to NULL to show that this slot if free again. */
\r
2725 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2727 /* Set the handle to NULL to ensure the same queue handle cannot
\r
2728 appear in the registry twice if it is added, removed, then
\r
2730 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
\r
2735 mtCOVERAGE_TEST_MARKER();
\r
2739 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2741 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2742 /*-----------------------------------------------------------*/
\r
2744 #if ( configUSE_TIMERS == 1 )
\r
2746 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2748 Queue_t * const pxQueue = xQueue;
\r
2750 /* This function should not be called by application code hence the
\r
2751 'Restricted' in its name. It is not part of the public API. It is
\r
2752 designed for use by kernel code, and has special calling requirements.
\r
2753 It can result in vListInsert() being called on a list that can only
\r
2754 possibly ever have one item in it, so the list will be fast, but even
\r
2755 so it should be called with the scheduler locked and not from a critical
\r
2758 /* Only do anything if there are no messages in the queue. This function
\r
2759 will not actually cause the task to block, just place it on a blocked
\r
2760 list. It will not block until the scheduler is unlocked - at which
\r
2761 time a yield will be performed. If an item is added to the queue while
\r
2762 the queue is locked, and the calling task blocks on the queue, then the
\r
2763 calling task will be immediately unblocked when the queue is unlocked. */
\r
2764 prvLockQueue( pxQueue );
\r
2765 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2767 /* There is nothing in the queue, block for the specified period. */
\r
2768 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2772 mtCOVERAGE_TEST_MARKER();
\r
2774 prvUnlockQueue( pxQueue );
\r
2777 #endif /* configUSE_TIMERS */
\r
2778 /*-----------------------------------------------------------*/
\r
2780 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
\r
2782 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2784 QueueSetHandle_t pxQueue;
\r
2786 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2791 #endif /* configUSE_QUEUE_SETS */
\r
2792 /*-----------------------------------------------------------*/
\r
2794 #if ( configUSE_QUEUE_SETS == 1 )
\r
2796 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2798 BaseType_t xReturn;
\r
2800 taskENTER_CRITICAL();
\r
2802 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2804 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2807 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2809 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2810 items in the queue/semaphore. */
\r
2815 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2819 taskEXIT_CRITICAL();
\r
2824 #endif /* configUSE_QUEUE_SETS */
\r
2825 /*-----------------------------------------------------------*/
\r
2827 #if ( configUSE_QUEUE_SETS == 1 )
\r
2829 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2831 BaseType_t xReturn;
\r
2832 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2834 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2836 /* The queue was not a member of the set. */
\r
2839 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2841 /* It is dangerous to remove a queue from a set when the queue is
\r
2842 not empty because the queue set will still hold pending events for
\r
2848 taskENTER_CRITICAL();
\r
2850 /* The queue is no longer contained in the set. */
\r
2851 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2853 taskEXIT_CRITICAL();
\r
2858 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2860 #endif /* configUSE_QUEUE_SETS */
\r
2861 /*-----------------------------------------------------------*/
\r
2863 #if ( configUSE_QUEUE_SETS == 1 )
\r
2865 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2867 QueueSetMemberHandle_t xReturn = NULL;
\r
2869 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2873 #endif /* configUSE_QUEUE_SETS */
\r
2874 /*-----------------------------------------------------------*/
\r
2876 #if ( configUSE_QUEUE_SETS == 1 )
\r
2878 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2880 QueueSetMemberHandle_t xReturn = NULL;
\r
2882 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2886 #endif /* configUSE_QUEUE_SETS */
\r
2887 /*-----------------------------------------------------------*/
\r
2889 #if ( configUSE_QUEUE_SETS == 1 )
\r
2891 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
\r
2893 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2894 BaseType_t xReturn = pdFALSE;
\r
2896 /* This function must be called form a critical section. */
\r
2898 configASSERT( pxQueueSetContainer );
\r
2899 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2901 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2903 const int8_t cTxLock = pxQueueSetContainer->cTxLock;
\r
2905 traceQUEUE_SET_SEND( pxQueueSetContainer );
\r
2907 /* The data copied is the handle of the queue that contains data. */
\r
2908 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
\r
2910 if( cTxLock == queueUNLOCKED )
\r
2912 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2914 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2916 /* The task waiting has a higher priority. */
\r
2921 mtCOVERAGE_TEST_MARKER();
\r
2926 mtCOVERAGE_TEST_MARKER();
\r
2931 configASSERT( cTxLock != queueINT8_MAX);
\r
2933 pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
\r
2938 mtCOVERAGE_TEST_MARKER();
\r
2944 #endif /* configUSE_QUEUE_SETS */
\r