2 FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 This file is part of the FreeRTOS distribution.
\r
9 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
10 the terms of the GNU General Public License (version 2) as published by the
\r
11 Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
\r
13 ***************************************************************************
\r
14 >>! NOTE: The modification to the GPL is included to allow you to !<<
\r
15 >>! distribute a combined work that includes FreeRTOS without being !<<
\r
16 >>! obliged to provide the source code for proprietary components !<<
\r
17 >>! outside of the FreeRTOS kernel. !<<
\r
18 ***************************************************************************
\r
20 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
22 FOR A PARTICULAR PURPOSE. Full license text is available on the following
\r
23 link: http://www.freertos.org/a00114.html
\r
25 ***************************************************************************
\r
27 * FreeRTOS provides completely free yet professionally developed, *
\r
28 * robust, strictly quality controlled, supported, and cross *
\r
29 * platform software that is more than just the market leader, it *
\r
30 * is the industry's de facto standard. *
\r
32 * Help yourself get started quickly while simultaneously helping *
\r
33 * to support the FreeRTOS project by purchasing a FreeRTOS *
\r
34 * tutorial book, reference manual, or both: *
\r
35 * http://www.FreeRTOS.org/Documentation *
\r
37 ***************************************************************************
\r
39 http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
\r
40 the FAQ page "My application does not run, what could be wrong?". Have you
\r
41 defined configASSERT()?
\r
43 http://www.FreeRTOS.org/support - In return for receiving this top quality
\r
44 embedded software for free we request you assist our global community by
\r
45 participating in the support forum.
\r
47 http://www.FreeRTOS.org/training - Investing in training allows your team to
\r
48 be as productive as possible as early as possible. Now you can receive
\r
49 FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
\r
50 Ltd, and the world's leading authority on the world's leading RTOS.
\r
52 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
53 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
54 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
56 http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
\r
57 Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
\r
59 http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
\r
60 Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
61 licenses offer ticketed support, indemnification and commercial middleware.
\r
63 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
64 engineered and independently SIL3 certified version for use in safety and
\r
65 mission critical applications that require provable dependability.
\r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
74 all the API functions to use the MPU wrappers. That should only be done when
\r
75 task.h is included from an application file. */
\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
78 #include "FreeRTOS.h"
\r
82 #if ( configUSE_CO_ROUTINES == 1 )
\r
83 #include "croutine.h"
\r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
88 header files above, but not in this file, in order to generate the correct
\r
89 privileged Vs unprivileged linkage and placement. */
\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
93 /* Constants used with the xRxLock and xTxLock structure members. */
\r
94 #define queueUNLOCKED ( ( BaseType_t ) -1 )
\r
95 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
\r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and
\r
98 pcTail members are used as pointers into the queue storage area. When the
\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
101 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
102 names to the pcHead and pcTail structure members to ensure the readability of
\r
103 the code is maintained despite this dual use of two structure members. An
\r
104 alternative implementation would be to use a union, but use of a union is
\r
105 against the coding standard (although an exception to the standard has been
\r
106 permitted where the dual use also significantly changes the type of the
\r
107 structure member). */
\r
108 #define pxMutexHolder pcTail
\r
109 #define uxQueueType pcHead
\r
110 #define queueQUEUE_IS_MUTEX NULL
\r
112 /* Semaphores do not actually store or copy data, so have an item size of
\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
\r
115 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
\r
117 #if( configUSE_PREEMPTION == 0 )
\r
118 /* If the cooperative scheduler is being used then a yield should not be
\r
119 performed just because a higher priority task has been woken. */
\r
120 #define queueYIELD_IF_USING_PREEMPTION()
\r
122 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
126 * Definition of the queue used by the scheduler.
\r
127 * Items are queued by copy, not reference. See the following link for the
\r
128 * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
\r
130 typedef struct QueueDefinition
\r
132 int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
\r
133 int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
134 int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
136 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
138 int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
139 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
142 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
143 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
145 volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
146 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
147 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
\r
149 volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
150 volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
152 #if ( configUSE_TRACE_FACILITY == 1 )
\r
153 UBaseType_t uxQueueNumber;
\r
154 uint8_t ucQueueType;
\r
157 #if ( configUSE_QUEUE_SETS == 1 )
\r
158 struct QueueDefinition *pxQueueSetContainer;
\r
163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
\r
164 name below to enable the use of older kernel aware debuggers. */
\r
165 typedef xQUEUE Queue_t;
\r
167 /*-----------------------------------------------------------*/
\r
170 * The queue registry is just a means for kernel aware debuggers to locate
\r
171 * queue structures. It has no other purpose so is an optional component.
\r
173 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
175 /* The type stored within the queue registry array. This allows a name
\r
176 to be assigned to each queue making kernel aware debugging a little
\r
177 more user friendly. */
\r
178 typedef struct QUEUE_REGISTRY_ITEM
\r
180 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
181 QueueHandle_t xHandle;
\r
182 } xQueueRegistryItem;
\r
184 /* The old xQueueRegistryItem name is maintained above then typedefed to the
\r
185 new xQueueRegistryItem name below to enable the use of older kernel aware
\r
187 typedef xQueueRegistryItem QueueRegistryItem_t;
\r
189 /* The queue registry is simply an array of QueueRegistryItem_t structures.
\r
190 The pcQueueName member of a structure being NULL is indicative of the
\r
191 array position being vacant. */
\r
192 PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
194 #endif /* configQUEUE_REGISTRY_SIZE */
\r
197 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
198 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
199 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
200 * queue is locked it will instead increment the appropriate queue lock count
\r
201 * to indicate that a task may require unblocking. When the queue in unlocked
\r
202 * these lock counts are inspected, and the appropriate action taken.
\r
204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
\r
207 * Uses a critical section to determine if there is any data in a queue.
\r
209 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
214 * Uses a critical section to determine if there is any space in a queue.
\r
216 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
\r
221 * Copies an item into the queue, either at the front of the queue or the
\r
222 * back of the queue.
\r
224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
\r
227 * Copies an item out of a queue.
\r
229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
231 #if ( configUSE_QUEUE_SETS == 1 )
\r
233 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
234 * the queue set that the queue contains data.
\r
236 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
\r
239 /*-----------------------------------------------------------*/
\r
242 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
243 * accessing the queue event lists.
\r
245 #define prvLockQueue( pxQueue ) \
\r
246 taskENTER_CRITICAL(); \
\r
248 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
250 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
252 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
254 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
257 taskEXIT_CRITICAL()
\r
258 /*-----------------------------------------------------------*/
\r
260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
\r
262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
264 configASSERT( pxQueue );
\r
266 taskENTER_CRITICAL();
\r
268 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
269 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
270 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
271 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
\r
272 pxQueue->xRxLock = queueUNLOCKED;
\r
273 pxQueue->xTxLock = queueUNLOCKED;
\r
275 if( xNewQueue == pdFALSE )
\r
277 /* If there are tasks blocked waiting to read from the queue, then
\r
278 the tasks will remain blocked as after this function exits the queue
\r
279 will still be empty. If there are tasks blocked waiting to write to
\r
280 the queue, then one should be unblocked as after this function exits
\r
281 it will be possible to write to it. */
\r
282 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
284 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
286 queueYIELD_IF_USING_PREEMPTION();
\r
290 mtCOVERAGE_TEST_MARKER();
\r
295 mtCOVERAGE_TEST_MARKER();
\r
300 /* Ensure the event queues start in the correct state. */
\r
301 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
302 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
305 taskEXIT_CRITICAL();
\r
307 /* A value is returned for calling semantic consistency with previous
\r
311 /*-----------------------------------------------------------*/
\r
313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
\r
315 Queue_t *pxNewQueue;
\r
316 size_t xQueueSizeInBytes;
\r
317 QueueHandle_t xReturn = NULL;
\r
319 /* Remove compiler warnings about unused parameters should
\r
320 configUSE_TRACE_FACILITY not be set to 1. */
\r
321 ( void ) ucQueueType;
\r
323 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
\r
325 if( uxItemSize == ( UBaseType_t ) 0 )
\r
327 /* There is not going to be a queue storage area. */
\r
328 xQueueSizeInBytes = ( size_t ) 0;
\r
332 /* The queue is one byte longer than asked for to make wrap checking
\r
334 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
337 /* Allocate the new queue structure and storage area. */
\r
338 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
\r
340 if( pxNewQueue != NULL )
\r
342 if( uxItemSize == ( UBaseType_t ) 0 )
\r
344 /* No RAM was allocated for the queue storage area, but PC head
\r
345 cannot be set to NULL because NULL is used as a key to say the queue
\r
346 is used as a mutex. Therefore just set pcHead to point to the queue
\r
347 as a benign value that is known to be within the memory map. */
\r
348 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
\r
352 /* Jump past the queue structure to find the location of the queue
\r
354 pxNewQueue->pcHead = ( ( int8_t * ) pxNewQueue ) + sizeof( Queue_t );
\r
357 /* Initialise the queue members as described above where the queue type
\r
359 pxNewQueue->uxLength = uxQueueLength;
\r
360 pxNewQueue->uxItemSize = uxItemSize;
\r
361 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
363 #if ( configUSE_TRACE_FACILITY == 1 )
\r
365 pxNewQueue->ucQueueType = ucQueueType;
\r
367 #endif /* configUSE_TRACE_FACILITY */
\r
369 #if( configUSE_QUEUE_SETS == 1 )
\r
371 pxNewQueue->pxQueueSetContainer = NULL;
\r
373 #endif /* configUSE_QUEUE_SETS */
\r
375 traceQUEUE_CREATE( pxNewQueue );
\r
376 xReturn = pxNewQueue;
\r
380 mtCOVERAGE_TEST_MARKER();
\r
383 configASSERT( xReturn );
\r
387 /*-----------------------------------------------------------*/
\r
389 #if ( configUSE_MUTEXES == 1 )
\r
391 QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
\r
393 Queue_t *pxNewQueue;
\r
395 /* Prevent compiler warnings about unused parameters if
\r
396 configUSE_TRACE_FACILITY does not equal 1. */
\r
397 ( void ) ucQueueType;
\r
399 /* Allocate the new queue structure. */
\r
400 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
\r
401 if( pxNewQueue != NULL )
\r
403 /* Information required for priority inheritance. */
\r
404 pxNewQueue->pxMutexHolder = NULL;
\r
405 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
407 /* Queues used as a mutex no data is actually copied into or out
\r
409 pxNewQueue->pcWriteTo = NULL;
\r
410 pxNewQueue->u.pcReadFrom = NULL;
\r
412 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
413 an item size of 0 as nothing is actually copied into or out
\r
415 pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
\r
416 pxNewQueue->uxLength = ( UBaseType_t ) 1U;
\r
417 pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
\r
418 pxNewQueue->xRxLock = queueUNLOCKED;
\r
419 pxNewQueue->xTxLock = queueUNLOCKED;
\r
421 #if ( configUSE_TRACE_FACILITY == 1 )
\r
423 pxNewQueue->ucQueueType = ucQueueType;
\r
427 #if ( configUSE_QUEUE_SETS == 1 )
\r
429 pxNewQueue->pxQueueSetContainer = NULL;
\r
433 /* Ensure the event queues start with the correct state. */
\r
434 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
435 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
437 traceCREATE_MUTEX( pxNewQueue );
\r
439 /* Start with the semaphore in the expected state. */
\r
440 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
\r
444 traceCREATE_MUTEX_FAILED();
\r
450 #endif /* configUSE_MUTEXES */
\r
451 /*-----------------------------------------------------------*/
\r
453 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
455 void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
\r
459 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
460 be called directly. Note: This is a good way of determining if the
\r
461 calling task is the mutex holder, but not a good way of determining the
\r
462 identity of the mutex holder, as the holder may change between the
\r
463 following critical section exiting and the function returning. */
\r
464 taskENTER_CRITICAL();
\r
466 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
468 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
\r
475 taskEXIT_CRITICAL();
\r
478 } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
\r
481 /*-----------------------------------------------------------*/
\r
483 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
485 BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
\r
487 BaseType_t xReturn;
\r
488 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
490 configASSERT( pxMutex );
\r
492 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
493 change outside of this task. If this task does not hold the mutex then
\r
494 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
495 this is the only condition we are interested in it does not matter if
\r
496 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
497 mutual exclusion is required to test the pxMutexHolder variable. */
\r
498 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
\r
500 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
502 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
503 the task handle, therefore no underflow check is required. Also,
\r
504 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
505 there can only be one, no mutual exclusion is required to modify the
\r
506 uxRecursiveCallCount member. */
\r
507 ( pxMutex->u.uxRecursiveCallCount )--;
\r
509 /* Have we unwound the call count? */
\r
510 if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
\r
512 /* Return the mutex. This will automatically unblock any other
\r
513 task that might be waiting to access the mutex. */
\r
514 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
518 mtCOVERAGE_TEST_MARKER();
\r
525 /* The mutex cannot be given because the calling task is not the
\r
529 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
535 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
536 /*-----------------------------------------------------------*/
\r
538 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
540 BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
\r
542 BaseType_t xReturn;
\r
543 Queue_t * const pxMutex = ( Queue_t * ) xMutex;
\r
545 configASSERT( pxMutex );
\r
547 /* Comments regarding mutual exclusion as per those within
\r
548 xQueueGiveMutexRecursive(). */
\r
550 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
552 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
554 ( pxMutex->u.uxRecursiveCallCount )++;
\r
559 xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
\r
561 /* pdPASS will only be returned if the mutex was successfully
\r
562 obtained. The calling task may have entered the Blocked state
\r
563 before reaching here. */
\r
564 if( xReturn == pdPASS )
\r
566 ( pxMutex->u.uxRecursiveCallCount )++;
\r
570 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
577 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
578 /*-----------------------------------------------------------*/
\r
580 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
582 QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
\r
584 QueueHandle_t xHandle;
\r
586 configASSERT( uxMaxCount != 0 );
\r
587 configASSERT( uxInitialCount <= uxMaxCount );
\r
589 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
591 if( xHandle != NULL )
\r
593 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
595 traceCREATE_COUNTING_SEMAPHORE();
\r
599 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
602 configASSERT( xHandle );
\r
606 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
607 /*-----------------------------------------------------------*/
\r
609 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
\r
611 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
\r
612 TimeOut_t xTimeOut;
\r
613 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
615 configASSERT( pxQueue );
\r
616 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
617 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
618 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
620 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
625 /* This function relaxes the coding standard somewhat to allow return
\r
626 statements within the function itself. This is done in the interest
\r
627 of execution time efficiency. */
\r
630 taskENTER_CRITICAL();
\r
632 /* Is there room on the queue now? The running task must be the
\r
633 highest priority task wanting to access the queue. If the head item
\r
634 in the queue is to be overwritten then it does not matter if the
\r
636 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
638 traceQUEUE_SEND( pxQueue );
\r
639 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
641 #if ( configUSE_QUEUE_SETS == 1 )
\r
643 if( pxQueue->pxQueueSetContainer != NULL )
\r
645 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
647 /* The queue is a member of a queue set, and posting
\r
648 to the queue set caused a higher priority task to
\r
649 unblock. A context switch is required. */
\r
650 queueYIELD_IF_USING_PREEMPTION();
\r
654 mtCOVERAGE_TEST_MARKER();
\r
659 /* If there was a task waiting for data to arrive on the
\r
660 queue then unblock it now. */
\r
661 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
663 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
665 /* The unblocked task has a priority higher than
\r
666 our own so yield immediately. Yes it is ok to
\r
667 do this from within the critical section - the
\r
668 kernel takes care of that. */
\r
669 queueYIELD_IF_USING_PREEMPTION();
\r
673 mtCOVERAGE_TEST_MARKER();
\r
676 else if( xYieldRequired != pdFALSE )
\r
678 /* This path is a special case that will only get
\r
679 executed if the task was holding multiple mutexes
\r
680 and the mutexes were given back in an order that is
\r
681 different to that in which they were taken. */
\r
682 queueYIELD_IF_USING_PREEMPTION();
\r
686 mtCOVERAGE_TEST_MARKER();
\r
690 #else /* configUSE_QUEUE_SETS */
\r
692 /* If there was a task waiting for data to arrive on the
\r
693 queue then unblock it now. */
\r
694 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
696 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
698 /* The unblocked task has a priority higher than
\r
699 our own so yield immediately. Yes it is ok to do
\r
700 this from within the critical section - the kernel
\r
701 takes care of that. */
\r
702 queueYIELD_IF_USING_PREEMPTION();
\r
706 mtCOVERAGE_TEST_MARKER();
\r
709 else if( xYieldRequired != pdFALSE )
\r
711 /* This path is a special case that will only get
\r
712 executed if the task was holding multiple mutexes and
\r
713 the mutexes were given back in an order that is
\r
714 different to that in which they were taken. */
\r
715 queueYIELD_IF_USING_PREEMPTION();
\r
719 mtCOVERAGE_TEST_MARKER();
\r
722 #endif /* configUSE_QUEUE_SETS */
\r
724 taskEXIT_CRITICAL();
\r
729 if( xTicksToWait == ( TickType_t ) 0 )
\r
731 /* The queue was full and no block time is specified (or
\r
732 the block time has expired) so leave now. */
\r
733 taskEXIT_CRITICAL();
\r
735 /* Return to the original privilege level before exiting
\r
737 traceQUEUE_SEND_FAILED( pxQueue );
\r
738 return errQUEUE_FULL;
\r
740 else if( xEntryTimeSet == pdFALSE )
\r
742 /* The queue was full and a block time was specified so
\r
743 configure the timeout structure. */
\r
744 vTaskSetTimeOutState( &xTimeOut );
\r
745 xEntryTimeSet = pdTRUE;
\r
749 /* Entry time was already set. */
\r
750 mtCOVERAGE_TEST_MARKER();
\r
754 taskEXIT_CRITICAL();
\r
756 /* Interrupts and other tasks can send to and receive from the queue
\r
757 now the critical section has been exited. */
\r
760 prvLockQueue( pxQueue );
\r
762 /* Update the timeout state to see if it has expired yet. */
\r
763 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
765 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
767 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
768 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
770 /* Unlocking the queue means queue events can effect the
\r
771 event list. It is possible that interrupts occurring now
\r
772 remove this task from the event list again - but as the
\r
773 scheduler is suspended the task will go onto the pending
\r
774 ready last instead of the actual ready list. */
\r
775 prvUnlockQueue( pxQueue );
\r
777 /* Resuming the scheduler will move tasks from the pending
\r
778 ready list into the ready list - so it is feasible that this
\r
779 task is already in a ready list before it yields - in which
\r
780 case the yield will not cause a context switch unless there
\r
781 is also a higher priority task in the pending ready list. */
\r
782 if( xTaskResumeAll() == pdFALSE )
\r
784 portYIELD_WITHIN_API();
\r
790 prvUnlockQueue( pxQueue );
\r
791 ( void ) xTaskResumeAll();
\r
796 /* The timeout has expired. */
\r
797 prvUnlockQueue( pxQueue );
\r
798 ( void ) xTaskResumeAll();
\r
800 traceQUEUE_SEND_FAILED( pxQueue );
\r
801 return errQUEUE_FULL;
\r
805 /*-----------------------------------------------------------*/
\r
807 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
809 BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
\r
811 BaseType_t xEntryTimeSet = pdFALSE;
\r
812 TimeOut_t xTimeOut;
\r
813 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
815 configASSERT( pxQueue );
\r
816 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
820 taskENTER_CRITICAL();
\r
822 /* Is there room on the queue now? To be running we must be
\r
823 the highest priority task wanting to access the queue. */
\r
824 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
826 traceQUEUE_SEND( pxQueue );
\r
827 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
829 /* If there was a task waiting for data to arrive on the
\r
830 queue then unblock it now. */
\r
831 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
833 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
835 /* The unblocked task has a priority higher than
\r
836 our own so yield immediately. */
\r
837 portYIELD_WITHIN_API();
\r
841 mtCOVERAGE_TEST_MARKER();
\r
846 mtCOVERAGE_TEST_MARKER();
\r
849 taskEXIT_CRITICAL();
\r
854 if( xTicksToWait == ( TickType_t ) 0 )
\r
856 taskEXIT_CRITICAL();
\r
857 return errQUEUE_FULL;
\r
859 else if( xEntryTimeSet == pdFALSE )
\r
861 vTaskSetTimeOutState( &xTimeOut );
\r
862 xEntryTimeSet = pdTRUE;
\r
866 taskEXIT_CRITICAL();
\r
868 taskENTER_CRITICAL();
\r
870 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
872 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
874 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
875 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
876 portYIELD_WITHIN_API();
\r
880 mtCOVERAGE_TEST_MARKER();
\r
885 taskEXIT_CRITICAL();
\r
886 traceQUEUE_SEND_FAILED( pxQueue );
\r
887 return errQUEUE_FULL;
\r
890 taskEXIT_CRITICAL();
\r
894 #endif /* configUSE_ALTERNATIVE_API */
\r
895 /*-----------------------------------------------------------*/
\r
897 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
899 BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
\r
901 BaseType_t xEntryTimeSet = pdFALSE;
\r
902 TimeOut_t xTimeOut;
\r
903 int8_t *pcOriginalReadPosition;
\r
904 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
906 configASSERT( pxQueue );
\r
907 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
911 taskENTER_CRITICAL();
\r
913 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
915 /* Remember our read position in case we are just peeking. */
\r
916 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
918 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
920 if( xJustPeeking == pdFALSE )
\r
922 traceQUEUE_RECEIVE( pxQueue );
\r
924 /* Data is actually being removed (not just peeked). */
\r
925 --( pxQueue->uxMessagesWaiting );
\r
927 #if ( configUSE_MUTEXES == 1 )
\r
929 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
931 /* Record the information required to implement
\r
932 priority inheritance should it become necessary. */
\r
933 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
\r
937 mtCOVERAGE_TEST_MARKER();
\r
942 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
944 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
946 portYIELD_WITHIN_API();
\r
950 mtCOVERAGE_TEST_MARKER();
\r
956 traceQUEUE_PEEK( pxQueue );
\r
958 /* The data is not being removed, so reset our read
\r
960 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
962 /* The data is being left in the queue, so see if there are
\r
963 any other tasks waiting for the data. */
\r
964 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
966 /* Tasks that are removed from the event list will get added to
\r
967 the pending ready list as the scheduler is still suspended. */
\r
968 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
970 /* The task waiting has a higher priority than this task. */
\r
971 portYIELD_WITHIN_API();
\r
975 mtCOVERAGE_TEST_MARKER();
\r
980 mtCOVERAGE_TEST_MARKER();
\r
984 taskEXIT_CRITICAL();
\r
989 if( xTicksToWait == ( TickType_t ) 0 )
\r
991 taskEXIT_CRITICAL();
\r
992 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
993 return errQUEUE_EMPTY;
\r
995 else if( xEntryTimeSet == pdFALSE )
\r
997 vTaskSetTimeOutState( &xTimeOut );
\r
998 xEntryTimeSet = pdTRUE;
\r
1002 taskEXIT_CRITICAL();
\r
1004 taskENTER_CRITICAL();
\r
1006 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1008 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1010 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1012 #if ( configUSE_MUTEXES == 1 )
\r
1014 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1016 taskENTER_CRITICAL();
\r
1018 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1020 taskEXIT_CRITICAL();
\r
1024 mtCOVERAGE_TEST_MARKER();
\r
1029 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1030 portYIELD_WITHIN_API();
\r
1034 mtCOVERAGE_TEST_MARKER();
\r
1039 taskEXIT_CRITICAL();
\r
1040 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1041 return errQUEUE_EMPTY;
\r
1044 taskEXIT_CRITICAL();
\r
1049 #endif /* configUSE_ALTERNATIVE_API */
\r
1050 /*-----------------------------------------------------------*/
\r
1052 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
\r
1054 BaseType_t xReturn;
\r
1055 UBaseType_t uxSavedInterruptStatus;
\r
1056 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1058 configASSERT( pxQueue );
\r
1059 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1060 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1062 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1063 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1064 above the maximum system call priority are kept permanently enabled, even
\r
1065 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1066 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1067 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1068 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1069 assigned a priority above the configured maximum system call priority.
\r
1070 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1071 that have been assigned a priority at or (logically) below the maximum
\r
1072 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1073 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1074 More information (albeit Cortex-M specific) is provided on the following
\r
1075 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1076 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1078 /* Similar to xQueueGenericSend, except without blocking if there is no room
\r
1079 in the queue. Also don't directly wake a task that was blocked on a queue
\r
1080 read, instead return a flag to say whether a context switch is required or
\r
1081 not (i.e. has a task with a higher priority than us been woken by this
\r
1083 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1085 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1087 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1089 /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
\r
1090 semaphore or mutex. That means prvCopyDataToQueue() cannot result
\r
1091 in a task disinheriting a priority and prvCopyDataToQueue() can be
\r
1092 called here even though the disinherit function does not check if
\r
1093 the scheduler is suspended before accessing the ready lists. */
\r
1094 ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1096 /* The event list is not altered if the queue is locked. This will
\r
1097 be done when the queue is unlocked later. */
\r
1098 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1100 #if ( configUSE_QUEUE_SETS == 1 )
\r
1102 if( pxQueue->pxQueueSetContainer != NULL )
\r
1104 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1106 /* The queue is a member of a queue set, and posting
\r
1107 to the queue set caused a higher priority task to
\r
1108 unblock. A context switch is required. */
\r
1109 if( pxHigherPriorityTaskWoken != NULL )
\r
1111 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1115 mtCOVERAGE_TEST_MARKER();
\r
1120 mtCOVERAGE_TEST_MARKER();
\r
1125 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1127 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1129 /* The task waiting has a higher priority so
\r
1130 record that a context switch is required. */
\r
1131 if( pxHigherPriorityTaskWoken != NULL )
\r
1133 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1137 mtCOVERAGE_TEST_MARKER();
\r
1142 mtCOVERAGE_TEST_MARKER();
\r
1147 mtCOVERAGE_TEST_MARKER();
\r
1151 #else /* configUSE_QUEUE_SETS */
\r
1153 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1155 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1157 /* The task waiting has a higher priority so record that a
\r
1158 context switch is required. */
\r
1159 if( pxHigherPriorityTaskWoken != NULL )
\r
1161 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1165 mtCOVERAGE_TEST_MARKER();
\r
1170 mtCOVERAGE_TEST_MARKER();
\r
1175 mtCOVERAGE_TEST_MARKER();
\r
1178 #endif /* configUSE_QUEUE_SETS */
\r
1182 /* Increment the lock count so the task that unlocks the queue
\r
1183 knows that data was posted while it was locked. */
\r
1184 ++( pxQueue->xTxLock );
\r
1191 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1192 xReturn = errQUEUE_FULL;
\r
1195 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1199 /*-----------------------------------------------------------*/
\r
1201 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1203 BaseType_t xReturn;
\r
1204 UBaseType_t uxSavedInterruptStatus;
\r
1205 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1207 /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
\r
1208 item size is 0. Don't directly wake a task that was blocked on a queue
\r
1209 read, instead return a flag to say whether a context switch is required or
\r
1210 not (i.e. has a task with a higher priority than us been woken by this
\r
1213 configASSERT( pxQueue );
\r
1215 /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
\r
1216 if the item size is not 0. */
\r
1217 configASSERT( pxQueue->uxItemSize == 0 );
\r
1219 /* Normally a mutex would not be given from an interrupt, especially if
\r
1220 there is a mutex holder, as priority inheritance makes no sense for an
\r
1221 interrupts, only tasks. */
\r
1222 configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
\r
1224 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1225 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1226 above the maximum system call priority are kept permanently enabled, even
\r
1227 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1228 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1229 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1230 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1231 assigned a priority above the configured maximum system call priority.
\r
1232 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1233 that have been assigned a priority at or (logically) below the maximum
\r
1234 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1235 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1236 More information (albeit Cortex-M specific) is provided on the following
\r
1237 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1238 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1240 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1242 /* When the queue is used to implement a semaphore no data is ever
\r
1243 moved through the queue but it is still valid to see if the queue 'has
\r
1245 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1247 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1249 /* A task can only have an inherited priority if it is a mutex
\r
1250 holder - and if there is a mutex holder then the mutex cannot be
\r
1251 given from an ISR. As this is the ISR version of the function it
\r
1252 can be assumed there is no mutex holder and no need to determine if
\r
1253 priority disinheritance is needed. Simply increase the count of
\r
1254 messages (semaphores) available. */
\r
1255 ++( pxQueue->uxMessagesWaiting );
\r
1257 /* The event list is not altered if the queue is locked. This will
\r
1258 be done when the queue is unlocked later. */
\r
1259 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1261 #if ( configUSE_QUEUE_SETS == 1 )
\r
1263 if( pxQueue->pxQueueSetContainer != NULL )
\r
1265 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1267 /* The semaphore is a member of a queue set, and
\r
1268 posting to the queue set caused a higher priority
\r
1269 task to unblock. A context switch is required. */
\r
1270 if( pxHigherPriorityTaskWoken != NULL )
\r
1272 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1276 mtCOVERAGE_TEST_MARKER();
\r
1281 mtCOVERAGE_TEST_MARKER();
\r
1286 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1288 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1290 /* The task waiting has a higher priority so
\r
1291 record that a context switch is required. */
\r
1292 if( pxHigherPriorityTaskWoken != NULL )
\r
1294 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1298 mtCOVERAGE_TEST_MARKER();
\r
1303 mtCOVERAGE_TEST_MARKER();
\r
1308 mtCOVERAGE_TEST_MARKER();
\r
1312 #else /* configUSE_QUEUE_SETS */
\r
1314 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1316 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1318 /* The task waiting has a higher priority so record that a
\r
1319 context switch is required. */
\r
1320 if( pxHigherPriorityTaskWoken != NULL )
\r
1322 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1326 mtCOVERAGE_TEST_MARKER();
\r
1331 mtCOVERAGE_TEST_MARKER();
\r
1336 mtCOVERAGE_TEST_MARKER();
\r
1339 #endif /* configUSE_QUEUE_SETS */
\r
1343 /* Increment the lock count so the task that unlocks the queue
\r
1344 knows that data was posted while it was locked. */
\r
1345 ++( pxQueue->xTxLock );
\r
1352 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1353 xReturn = errQUEUE_FULL;
\r
1356 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1360 /*-----------------------------------------------------------*/
\r
1362 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
\r
1364 BaseType_t xEntryTimeSet = pdFALSE;
\r
1365 TimeOut_t xTimeOut;
\r
1366 int8_t *pcOriginalReadPosition;
\r
1367 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1369 configASSERT( pxQueue );
\r
1370 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1371 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1373 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1377 /* This function relaxes the coding standard somewhat to allow return
\r
1378 statements within the function itself. This is done in the interest
\r
1379 of execution time efficiency. */
\r
1383 taskENTER_CRITICAL();
\r
1385 /* Is there data in the queue now? To be running the calling task
\r
1386 must be the highest priority task wanting to access the queue. */
\r
1387 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1389 /* Remember the read position in case the queue is only being
\r
1391 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1393 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1395 if( xJustPeeking == pdFALSE )
\r
1397 traceQUEUE_RECEIVE( pxQueue );
\r
1399 /* Actually removing data, not just peeking. */
\r
1400 --( pxQueue->uxMessagesWaiting );
\r
1402 #if ( configUSE_MUTEXES == 1 )
\r
1404 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1406 /* Record the information required to implement
\r
1407 priority inheritance should it become necessary. */
\r
1408 pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
\r
1412 mtCOVERAGE_TEST_MARKER();
\r
1415 #endif /* configUSE_MUTEXES */
\r
1417 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1419 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1421 queueYIELD_IF_USING_PREEMPTION();
\r
1425 mtCOVERAGE_TEST_MARKER();
\r
1430 mtCOVERAGE_TEST_MARKER();
\r
1435 traceQUEUE_PEEK( pxQueue );
\r
1437 /* The data is not being removed, so reset the read
\r
1439 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1441 /* The data is being left in the queue, so see if there are
\r
1442 any other tasks waiting for the data. */
\r
1443 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1445 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1447 /* The task waiting has a higher priority than this task. */
\r
1448 queueYIELD_IF_USING_PREEMPTION();
\r
1452 mtCOVERAGE_TEST_MARKER();
\r
1457 mtCOVERAGE_TEST_MARKER();
\r
1461 taskEXIT_CRITICAL();
\r
1466 if( xTicksToWait == ( TickType_t ) 0 )
\r
1468 /* The queue was empty and no block time is specified (or
\r
1469 the block time has expired) so leave now. */
\r
1470 taskEXIT_CRITICAL();
\r
1471 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1472 return errQUEUE_EMPTY;
\r
1474 else if( xEntryTimeSet == pdFALSE )
\r
1476 /* The queue was empty and a block time was specified so
\r
1477 configure the timeout structure. */
\r
1478 vTaskSetTimeOutState( &xTimeOut );
\r
1479 xEntryTimeSet = pdTRUE;
\r
1483 /* Entry time was already set. */
\r
1484 mtCOVERAGE_TEST_MARKER();
\r
1488 taskEXIT_CRITICAL();
\r
1490 /* Interrupts and other tasks can send to and receive from the queue
\r
1491 now the critical section has been exited. */
\r
1493 vTaskSuspendAll();
\r
1494 prvLockQueue( pxQueue );
\r
1496 /* Update the timeout state to see if it has expired yet. */
\r
1497 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1499 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1501 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1503 #if ( configUSE_MUTEXES == 1 )
\r
1505 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1507 taskENTER_CRITICAL();
\r
1509 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1511 taskEXIT_CRITICAL();
\r
1515 mtCOVERAGE_TEST_MARKER();
\r
1520 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1521 prvUnlockQueue( pxQueue );
\r
1522 if( xTaskResumeAll() == pdFALSE )
\r
1524 portYIELD_WITHIN_API();
\r
1528 mtCOVERAGE_TEST_MARKER();
\r
1534 prvUnlockQueue( pxQueue );
\r
1535 ( void ) xTaskResumeAll();
\r
1540 prvUnlockQueue( pxQueue );
\r
1541 ( void ) xTaskResumeAll();
\r
1543 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1545 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1546 return errQUEUE_EMPTY;
\r
1550 mtCOVERAGE_TEST_MARKER();
\r
1555 /*-----------------------------------------------------------*/
\r
1557 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
\r
1559 BaseType_t xReturn;
\r
1560 UBaseType_t uxSavedInterruptStatus;
\r
1561 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1563 configASSERT( pxQueue );
\r
1564 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1566 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1567 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1568 above the maximum system call priority are kept permanently enabled, even
\r
1569 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1570 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1571 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1572 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1573 assigned a priority above the configured maximum system call priority.
\r
1574 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1575 that have been assigned a priority at or (logically) below the maximum
\r
1576 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1577 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1578 More information (albeit Cortex-M specific) is provided on the following
\r
1579 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1580 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1582 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1584 /* Cannot block in an ISR, so check there is data available. */
\r
1585 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1587 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1589 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1590 --( pxQueue->uxMessagesWaiting );
\r
1592 /* If the queue is locked the event list will not be modified.
\r
1593 Instead update the lock count so the task that unlocks the queue
\r
1594 will know that an ISR has removed data while the queue was
\r
1596 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1598 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1600 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1602 /* The task waiting has a higher priority than us so
\r
1603 force a context switch. */
\r
1604 if( pxHigherPriorityTaskWoken != NULL )
\r
1606 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1610 mtCOVERAGE_TEST_MARKER();
\r
1615 mtCOVERAGE_TEST_MARKER();
\r
1620 mtCOVERAGE_TEST_MARKER();
\r
1625 /* Increment the lock count so the task that unlocks the queue
\r
1626 knows that data was removed while it was locked. */
\r
1627 ++( pxQueue->xRxLock );
\r
1635 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1638 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1642 /*-----------------------------------------------------------*/
\r
1644 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
\r
1646 BaseType_t xReturn;
\r
1647 UBaseType_t uxSavedInterruptStatus;
\r
1648 int8_t *pcOriginalReadPosition;
\r
1649 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1651 configASSERT( pxQueue );
\r
1652 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
\r
1653 configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
\r
1655 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1656 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1657 above the maximum system call priority are kept permanently enabled, even
\r
1658 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1659 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1660 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1661 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1662 assigned a priority above the configured maximum system call priority.
\r
1663 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1664 that have been assigned a priority at or (logically) below the maximum
\r
1665 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1666 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1667 More information (albeit Cortex-M specific) is provided on the following
\r
1668 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1669 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1671 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1673 /* Cannot block in an ISR, so check there is data available. */
\r
1674 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1676 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1678 /* Remember the read position so it can be reset as nothing is
\r
1679 actually being removed from the queue. */
\r
1680 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1681 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1682 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1689 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1692 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1696 /*-----------------------------------------------------------*/
\r
1698 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
\r
1700 UBaseType_t uxReturn;
\r
1702 configASSERT( xQueue );
\r
1704 taskENTER_CRITICAL();
\r
1706 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1708 taskEXIT_CRITICAL();
\r
1711 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1712 /*-----------------------------------------------------------*/
\r
1714 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
\r
1716 UBaseType_t uxReturn;
\r
1719 pxQueue = ( Queue_t * ) xQueue;
\r
1720 configASSERT( pxQueue );
\r
1722 taskENTER_CRITICAL();
\r
1724 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1726 taskEXIT_CRITICAL();
\r
1729 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1730 /*-----------------------------------------------------------*/
\r
1732 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
\r
1734 UBaseType_t uxReturn;
\r
1736 configASSERT( xQueue );
\r
1738 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
\r
1741 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1742 /*-----------------------------------------------------------*/
\r
1744 void vQueueDelete( QueueHandle_t xQueue )
\r
1746 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
1748 configASSERT( pxQueue );
\r
1750 traceQUEUE_DELETE( pxQueue );
\r
1751 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1753 vQueueUnregisterQueue( pxQueue );
\r
1756 vPortFree( pxQueue );
\r
1758 /*-----------------------------------------------------------*/
\r
1760 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1762 UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
\r
1764 return ( ( Queue_t * ) xQueue )->uxQueueNumber;
\r
1767 #endif /* configUSE_TRACE_FACILITY */
\r
1768 /*-----------------------------------------------------------*/
\r
1770 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1772 void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
\r
1774 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1777 #endif /* configUSE_TRACE_FACILITY */
\r
1778 /*-----------------------------------------------------------*/
\r
1780 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1782 uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
\r
1784 return ( ( Queue_t * ) xQueue )->ucQueueType;
\r
1787 #endif /* configUSE_TRACE_FACILITY */
\r
1788 /*-----------------------------------------------------------*/
\r
1790 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
\r
1792 BaseType_t xReturn = pdFALSE;
\r
1794 if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
\r
1796 #if ( configUSE_MUTEXES == 1 )
\r
1798 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1800 /* The mutex is no longer being held. */
\r
1801 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1802 pxQueue->pxMutexHolder = NULL;
\r
1806 mtCOVERAGE_TEST_MARKER();
\r
1809 #endif /* configUSE_MUTEXES */
\r
1811 else if( xPosition == queueSEND_TO_BACK )
\r
1813 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1814 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1815 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1817 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1821 mtCOVERAGE_TEST_MARKER();
\r
1826 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1827 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1828 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1830 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1834 mtCOVERAGE_TEST_MARKER();
\r
1837 if( xPosition == queueOVERWRITE )
\r
1839 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
1841 /* An item is not being added but overwritten, so subtract
\r
1842 one from the recorded number of items in the queue so when
\r
1843 one is added again below the number of recorded items remains
\r
1845 --( pxQueue->uxMessagesWaiting );
\r
1849 mtCOVERAGE_TEST_MARKER();
\r
1854 mtCOVERAGE_TEST_MARKER();
\r
1858 ++( pxQueue->uxMessagesWaiting );
\r
1862 /*-----------------------------------------------------------*/
\r
1864 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
\r
1866 if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
\r
1868 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1869 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1871 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1875 mtCOVERAGE_TEST_MARKER();
\r
1877 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1880 /*-----------------------------------------------------------*/
\r
1882 static void prvUnlockQueue( Queue_t * const pxQueue )
\r
1884 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1886 /* The lock counts contains the number of extra data items placed or
\r
1887 removed from the queue while the queue was locked. When a queue is
\r
1888 locked items can be added or removed, but the event lists cannot be
\r
1890 taskENTER_CRITICAL();
\r
1892 /* See if data was added to the queue while it was locked. */
\r
1893 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1895 /* Data was posted while the queue was locked. Are any tasks
\r
1896 blocked waiting for data to become available? */
\r
1897 #if ( configUSE_QUEUE_SETS == 1 )
\r
1899 if( pxQueue->pxQueueSetContainer != NULL )
\r
1901 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1903 /* The queue is a member of a queue set, and posting to
\r
1904 the queue set caused a higher priority task to unblock.
\r
1905 A context switch is required. */
\r
1906 vTaskMissedYield();
\r
1910 mtCOVERAGE_TEST_MARKER();
\r
1915 /* Tasks that are removed from the event list will get added to
\r
1916 the pending ready list as the scheduler is still suspended. */
\r
1917 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1919 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1921 /* The task waiting has a higher priority so record that a
\r
1922 context switch is required. */
\r
1923 vTaskMissedYield();
\r
1927 mtCOVERAGE_TEST_MARKER();
\r
1936 #else /* configUSE_QUEUE_SETS */
\r
1938 /* Tasks that are removed from the event list will get added to
\r
1939 the pending ready list as the scheduler is still suspended. */
\r
1940 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1942 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1944 /* The task waiting has a higher priority so record that
\r
1945 a context switch is required. */
\r
1946 vTaskMissedYield();
\r
1950 mtCOVERAGE_TEST_MARKER();
\r
1958 #endif /* configUSE_QUEUE_SETS */
\r
1960 --( pxQueue->xTxLock );
\r
1963 pxQueue->xTxLock = queueUNLOCKED;
\r
1965 taskEXIT_CRITICAL();
\r
1967 /* Do the same for the Rx lock. */
\r
1968 taskENTER_CRITICAL();
\r
1970 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1972 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1974 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1976 vTaskMissedYield();
\r
1980 mtCOVERAGE_TEST_MARKER();
\r
1983 --( pxQueue->xRxLock );
\r
1991 pxQueue->xRxLock = queueUNLOCKED;
\r
1993 taskEXIT_CRITICAL();
\r
1995 /*-----------------------------------------------------------*/
\r
1997 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
\r
1999 BaseType_t xReturn;
\r
2001 taskENTER_CRITICAL();
\r
2003 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2009 xReturn = pdFALSE;
\r
2012 taskEXIT_CRITICAL();
\r
2016 /*-----------------------------------------------------------*/
\r
2018 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
\r
2020 BaseType_t xReturn;
\r
2022 configASSERT( xQueue );
\r
2023 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2029 xReturn = pdFALSE;
\r
2033 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2034 /*-----------------------------------------------------------*/
\r
2036 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
\r
2038 BaseType_t xReturn;
\r
2040 taskENTER_CRITICAL();
\r
2042 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
2048 xReturn = pdFALSE;
\r
2051 taskEXIT_CRITICAL();
\r
2055 /*-----------------------------------------------------------*/
\r
2057 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
\r
2059 BaseType_t xReturn;
\r
2061 configASSERT( xQueue );
\r
2062 if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
\r
2068 xReturn = pdFALSE;
\r
2072 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2073 /*-----------------------------------------------------------*/
\r
2075 #if ( configUSE_CO_ROUTINES == 1 )
\r
2077 BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
\r
2079 BaseType_t xReturn;
\r
2080 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2082 /* If the queue is already full we may have to block. A critical section
\r
2083 is required to prevent an interrupt removing something from the queue
\r
2084 between the check to see if the queue is full and blocking on the queue. */
\r
2085 portDISABLE_INTERRUPTS();
\r
2087 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
2089 /* The queue is full - do we want to block or just leave without
\r
2091 if( xTicksToWait > ( TickType_t ) 0 )
\r
2093 /* As this is called from a coroutine we cannot block directly, but
\r
2094 return indicating that we need to block. */
\r
2095 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
2096 portENABLE_INTERRUPTS();
\r
2097 return errQUEUE_BLOCKED;
\r
2101 portENABLE_INTERRUPTS();
\r
2102 return errQUEUE_FULL;
\r
2106 portENABLE_INTERRUPTS();
\r
2108 portDISABLE_INTERRUPTS();
\r
2110 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2112 /* There is room in the queue, copy the data into the queue. */
\r
2113 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2116 /* Were any co-routines waiting for data to become available? */
\r
2117 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2119 /* In this instance the co-routine could be placed directly
\r
2120 into the ready list as we are within a critical section.
\r
2121 Instead the same pending ready list mechanism is used as if
\r
2122 the event were caused from within an interrupt. */
\r
2123 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2125 /* The co-routine waiting has a higher priority so record
\r
2126 that a yield might be appropriate. */
\r
2127 xReturn = errQUEUE_YIELD;
\r
2131 mtCOVERAGE_TEST_MARKER();
\r
2136 mtCOVERAGE_TEST_MARKER();
\r
2141 xReturn = errQUEUE_FULL;
\r
2144 portENABLE_INTERRUPTS();
\r
2149 #endif /* configUSE_CO_ROUTINES */
\r
2150 /*-----------------------------------------------------------*/
\r
2152 #if ( configUSE_CO_ROUTINES == 1 )
\r
2154 BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
\r
2156 BaseType_t xReturn;
\r
2157 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2159 /* If the queue is already empty we may have to block. A critical section
\r
2160 is required to prevent an interrupt adding something to the queue
\r
2161 between the check to see if the queue is empty and blocking on the queue. */
\r
2162 portDISABLE_INTERRUPTS();
\r
2164 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
\r
2166 /* There are no messages in the queue, do we want to block or just
\r
2167 leave with nothing? */
\r
2168 if( xTicksToWait > ( TickType_t ) 0 )
\r
2170 /* As this is a co-routine we cannot block directly, but return
\r
2171 indicating that we need to block. */
\r
2172 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
2173 portENABLE_INTERRUPTS();
\r
2174 return errQUEUE_BLOCKED;
\r
2178 portENABLE_INTERRUPTS();
\r
2179 return errQUEUE_FULL;
\r
2184 mtCOVERAGE_TEST_MARKER();
\r
2187 portENABLE_INTERRUPTS();
\r
2189 portDISABLE_INTERRUPTS();
\r
2191 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2193 /* Data is available from the queue. */
\r
2194 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2195 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2197 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2201 mtCOVERAGE_TEST_MARKER();
\r
2203 --( pxQueue->uxMessagesWaiting );
\r
2204 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2208 /* Were any co-routines waiting for space to become available? */
\r
2209 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2211 /* In this instance the co-routine could be placed directly
\r
2212 into the ready list as we are within a critical section.
\r
2213 Instead the same pending ready list mechanism is used as if
\r
2214 the event were caused from within an interrupt. */
\r
2215 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2217 xReturn = errQUEUE_YIELD;
\r
2221 mtCOVERAGE_TEST_MARKER();
\r
2226 mtCOVERAGE_TEST_MARKER();
\r
2234 portENABLE_INTERRUPTS();
\r
2239 #endif /* configUSE_CO_ROUTINES */
\r
2240 /*-----------------------------------------------------------*/
\r
2242 #if ( configUSE_CO_ROUTINES == 1 )
\r
2244 BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
\r
2246 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2248 /* Cannot block within an ISR so if there is no space on the queue then
\r
2249 exit without doing anything. */
\r
2250 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2252 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2254 /* We only want to wake one co-routine per ISR, so check that a
\r
2255 co-routine has not already been woken. */
\r
2256 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2258 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2260 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2266 mtCOVERAGE_TEST_MARKER();
\r
2271 mtCOVERAGE_TEST_MARKER();
\r
2276 mtCOVERAGE_TEST_MARKER();
\r
2281 mtCOVERAGE_TEST_MARKER();
\r
2284 return xCoRoutinePreviouslyWoken;
\r
2287 #endif /* configUSE_CO_ROUTINES */
\r
2288 /*-----------------------------------------------------------*/
\r
2290 #if ( configUSE_CO_ROUTINES == 1 )
\r
2292 BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
\r
2294 BaseType_t xReturn;
\r
2295 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2297 /* We cannot block from an ISR, so check there is data available. If
\r
2298 not then just leave without doing anything. */
\r
2299 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
\r
2301 /* Copy the data from the queue. */
\r
2302 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2303 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2305 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2309 mtCOVERAGE_TEST_MARKER();
\r
2311 --( pxQueue->uxMessagesWaiting );
\r
2312 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2314 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2316 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2318 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2320 *pxCoRoutineWoken = pdTRUE;
\r
2324 mtCOVERAGE_TEST_MARKER();
\r
2329 mtCOVERAGE_TEST_MARKER();
\r
2334 mtCOVERAGE_TEST_MARKER();
\r
2347 #endif /* configUSE_CO_ROUTINES */
\r
2348 /*-----------------------------------------------------------*/
\r
2350 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2352 void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
\r
2356 /* See if there is an empty space in the registry. A NULL name denotes
\r
2358 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2360 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2362 /* Store the information on this queue. */
\r
2363 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2364 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2366 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
\r
2371 mtCOVERAGE_TEST_MARKER();
\r
2376 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2377 /*-----------------------------------------------------------*/
\r
2379 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2381 const char *pcQueueGetQueueName( QueueHandle_t xQueue )
\r
2384 const char *pcReturn = NULL;
\r
2386 /* Note there is nothing here to protect against another task adding or
\r
2387 removing entries from the registry while it is being searched. */
\r
2388 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2390 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2392 pcReturn = xQueueRegistry[ ux ].pcQueueName;
\r
2397 mtCOVERAGE_TEST_MARKER();
\r
2404 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2405 /*-----------------------------------------------------------*/
\r
2407 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2409 void vQueueUnregisterQueue( QueueHandle_t xQueue )
\r
2413 /* See if the handle of the queue being unregistered in actually in the
\r
2415 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2417 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2419 /* Set the name to NULL to show that this slot if free again. */
\r
2420 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2422 /* Set the handle to NULL to ensure the same queue handle cannot
\r
2423 appear in the registry twice if it is added, removed, then
\r
2425 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
\r
2430 mtCOVERAGE_TEST_MARKER();
\r
2434 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2436 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2437 /*-----------------------------------------------------------*/
\r
2439 #if ( configUSE_TIMERS == 1 )
\r
2441 void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
\r
2443 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
\r
2445 /* This function should not be called by application code hence the
\r
2446 'Restricted' in its name. It is not part of the public API. It is
\r
2447 designed for use by kernel code, and has special calling requirements.
\r
2448 It can result in vListInsert() being called on a list that can only
\r
2449 possibly ever have one item in it, so the list will be fast, but even
\r
2450 so it should be called with the scheduler locked and not from a critical
\r
2453 /* Only do anything if there are no messages in the queue. This function
\r
2454 will not actually cause the task to block, just place it on a blocked
\r
2455 list. It will not block until the scheduler is unlocked - at which
\r
2456 time a yield will be performed. If an item is added to the queue while
\r
2457 the queue is locked, and the calling task blocks on the queue, then the
\r
2458 calling task will be immediately unblocked when the queue is unlocked. */
\r
2459 prvLockQueue( pxQueue );
\r
2460 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
\r
2462 /* There is nothing in the queue, block for the specified period. */
\r
2463 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
\r
2467 mtCOVERAGE_TEST_MARKER();
\r
2469 prvUnlockQueue( pxQueue );
\r
2472 #endif /* configUSE_TIMERS */
\r
2473 /*-----------------------------------------------------------*/
\r
2475 #if ( configUSE_QUEUE_SETS == 1 )
\r
2477 QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
\r
2479 QueueSetHandle_t pxQueue;
\r
2481 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
\r
2486 #endif /* configUSE_QUEUE_SETS */
\r
2487 /*-----------------------------------------------------------*/
\r
2489 #if ( configUSE_QUEUE_SETS == 1 )
\r
2491 BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2493 BaseType_t xReturn;
\r
2495 taskENTER_CRITICAL();
\r
2497 if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2499 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2502 else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2504 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2505 items in the queue/semaphore. */
\r
2510 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2514 taskEXIT_CRITICAL();
\r
2519 #endif /* configUSE_QUEUE_SETS */
\r
2520 /*-----------------------------------------------------------*/
\r
2522 #if ( configUSE_QUEUE_SETS == 1 )
\r
2524 BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
\r
2526 BaseType_t xReturn;
\r
2527 Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
\r
2529 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2531 /* The queue was not a member of the set. */
\r
2534 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
\r
2536 /* It is dangerous to remove a queue from a set when the queue is
\r
2537 not empty because the queue set will still hold pending events for
\r
2543 taskENTER_CRITICAL();
\r
2545 /* The queue is no longer contained in the set. */
\r
2546 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2548 taskEXIT_CRITICAL();
\r
2553 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2555 #endif /* configUSE_QUEUE_SETS */
\r
2556 /*-----------------------------------------------------------*/
\r
2558 #if ( configUSE_QUEUE_SETS == 1 )
\r
2560 QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
\r
2562 QueueSetMemberHandle_t xReturn = NULL;
\r
2564 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2568 #endif /* configUSE_QUEUE_SETS */
\r
2569 /*-----------------------------------------------------------*/
\r
2571 #if ( configUSE_QUEUE_SETS == 1 )
\r
2573 QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
\r
2575 QueueSetMemberHandle_t xReturn = NULL;
\r
2577 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2581 #endif /* configUSE_QUEUE_SETS */
\r
2582 /*-----------------------------------------------------------*/
\r
2584 #if ( configUSE_QUEUE_SETS == 1 )
\r
2586 static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
\r
2588 Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2589 BaseType_t xReturn = pdFALSE;
\r
2591 /* This function must be called form a critical section. */
\r
2593 configASSERT( pxQueueSetContainer );
\r
2594 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2596 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2598 traceQUEUE_SEND( pxQueueSetContainer );
\r
2600 /* The data copied is the handle of the queue that contains data. */
\r
2601 xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2603 if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
\r
2605 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2607 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2609 /* The task waiting has a higher priority. */
\r
2614 mtCOVERAGE_TEST_MARKER();
\r
2619 mtCOVERAGE_TEST_MARKER();
\r
2624 ( pxQueueSetContainer->xTxLock )++;
\r
2629 mtCOVERAGE_TEST_MARKER();
\r
2635 #endif /* configUSE_QUEUE_SETS */
\r