2 FreeRTOS V7.6.0 - Copyright (C) 2013 Real Time Engineers Ltd.
\r
5 VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
\r
7 ***************************************************************************
\r
9 * FreeRTOS provides completely free yet professionally developed, *
\r
10 * robust, strictly quality controlled, supported, and cross *
\r
11 * platform software that has become a de facto standard. *
\r
13 * Help yourself get started quickly and support the FreeRTOS *
\r
14 * project by purchasing a FreeRTOS tutorial book, reference *
\r
15 * manual, or both from: http://www.FreeRTOS.org/Documentation *
\r
19 ***************************************************************************
\r
21 This file is part of the FreeRTOS distribution.
\r
23 FreeRTOS is free software; you can redistribute it and/or modify it under
\r
24 the terms of the GNU General Public License (version 2) as published by the
\r
25 Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
\r
27 >>! NOTE: The modification to the GPL is included to allow you to distribute
\r
28 >>! a combined work that includes FreeRTOS without being obliged to provide
\r
29 >>! the source code for proprietary components outside of the FreeRTOS
\r
32 FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
\r
33 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
34 FOR A PARTICULAR PURPOSE. Full license text is available from the following
\r
35 link: http://www.freertos.org/a00114.html
\r
39 ***************************************************************************
\r
41 * Having a problem? Start by reading the FAQ "My application does *
\r
42 * not run, what could be wrong?" *
\r
44 * http://www.FreeRTOS.org/FAQHelp.html *
\r
46 ***************************************************************************
\r
48 http://www.FreeRTOS.org - Documentation, books, training, latest versions,
\r
49 license and Real Time Engineers Ltd. contact details.
\r
51 http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
\r
52 including FreeRTOS+Trace - an indispensable productivity tool, a DOS
\r
53 compatible FAT file system, and our tiny thread aware UDP/IP stack.
\r
55 http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
\r
56 Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
\r
57 licenses offer ticketed support, indemnification and middleware.
\r
59 http://www.SafeRTOS.com - High Integrity Systems also provide a safety
\r
60 engineered and independently SIL3 certified version for use in safety and
\r
61 mission critical applications that require provable dependability.
\r
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
\r
70 all the API functions to use the MPU wrappers. That should only be done when
\r
71 task.h is included from an application file. */
\r
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
\r
74 #include "FreeRTOS.h"
\r
78 #if ( configUSE_CO_ROUTINES == 1 )
\r
79 #include "croutine.h"
\r
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
\r
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
\r
84 header files above, but not in this file, in order to generate the correct
\r
85 privileged Vs unprivileged linkage and placement. */
\r
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
\r
89 /* Constants used with the cRxLock and xTxLock structure members. */
\r
90 #define queueUNLOCKED ( ( signed portBASE_TYPE ) -1 )
\r
91 #define queueLOCKED_UNMODIFIED ( ( signed portBASE_TYPE ) 0 )
\r
93 /* When the xQUEUE structure is used to represent a base queue its pcHead and
\r
94 pcTail members are used as pointers into the queue storage area. When the
\r
95 xQUEUE structure is used to represent a mutex pcHead and pcTail pointers are
\r
96 not necessary, and the pcHead pointer is set to NULL to indicate that the
\r
97 pcTail pointer actually points to the mutex holder (if any). Map alternative
\r
98 names to the pcHead and pcTail structure members to ensure the readability of
\r
99 the code is maintained despite this dual use of two structure members. An
\r
100 alternative implementation would be to use a union, but use of a union is
\r
101 against the coding standard (although an exception to the standard has been
\r
102 permitted where the dual use also significantly changes the type of the
\r
103 structure member). */
\r
104 #define pxMutexHolder pcTail
\r
105 #define uxQueueType pcHead
\r
106 #define queueQUEUE_IS_MUTEX NULL
\r
108 /* Semaphores do not actually store or copy data, so have an item size of
\r
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( unsigned portBASE_TYPE ) 0 )
\r
111 #define queueMUTEX_GIVE_BLOCK_TIME ( ( portTickType ) 0U )
\r
113 #if( configUSE_PREEMPTION == 0 )
\r
114 /* If the cooperative scheduler is being used then a yield should not be
\r
115 performed just because a higher priority task has been woken. */
\r
116 #define queueYIELD_IF_USING_PREEMPTION()
\r
118 #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
\r
122 * Definition of the queue used by the scheduler.
\r
123 * Items are queued by copy, not reference.
\r
125 typedef struct QueueDefinition
\r
127 signed char *pcHead; /*< Points to the beginning of the queue storage area. */
\r
128 signed char *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
\r
130 signed char *pcWriteTo; /*< Points to the free next place in the storage area. */
\r
132 union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
\r
134 signed char *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
\r
135 unsigned portBASE_TYPE uxRecursiveCallCount;/*< Maintains a count of the numebr of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
\r
138 xList xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
\r
139 xList xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
\r
141 volatile unsigned portBASE_TYPE uxMessagesWaiting;/*< The number of items currently in the queue. */
\r
142 unsigned portBASE_TYPE uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
\r
143 unsigned portBASE_TYPE uxItemSize; /*< The size of each items that the queue will hold. */
\r
145 volatile signed portBASE_TYPE xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
146 volatile signed portBASE_TYPE xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
\r
148 #if ( configUSE_TRACE_FACILITY == 1 )
\r
149 unsigned portBASE_TYPE uxQueueNumber;
\r
150 unsigned char ucQueueType;
\r
153 #if ( configUSE_QUEUE_SETS == 1 )
\r
154 struct QueueDefinition *pxQueueSetContainer;
\r
158 /*-----------------------------------------------------------*/
\r
161 * The queue registry is just a means for kernel aware debuggers to locate
\r
162 * queue structures. It has no other purpose so is an optional component.
\r
164 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
166 /* The type stored within the queue registry array. This allows a name
\r
167 to be assigned to each queue making kernel aware debugging a little
\r
168 more user friendly. */
\r
169 typedef struct QUEUE_REGISTRY_ITEM
\r
171 signed char *pcQueueName;
\r
172 xQueueHandle xHandle;
\r
173 } xQueueRegistryItem;
\r
175 /* The queue registry is simply an array of xQueueRegistryItem structures.
\r
176 The pcQueueName member of a structure being NULL is indicative of the
\r
177 array position being vacant. */
\r
178 xQueueRegistryItem xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
\r
180 #endif /* configQUEUE_REGISTRY_SIZE */
\r
183 * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
\r
184 * prevent an ISR from adding or removing items to the queue, but does prevent
\r
185 * an ISR from removing tasks from the queue event lists. If an ISR finds a
\r
186 * queue is locked it will instead increment the appropriate queue lock count
\r
187 * to indicate that a task may require unblocking. When the queue in unlocked
\r
188 * these lock counts are inspected, and the appropriate action taken.
\r
190 static void prvUnlockQueue( xQUEUE *pxQueue ) PRIVILEGED_FUNCTION;
\r
193 * Uses a critical section to determine if there is any data in a queue.
\r
195 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
\r
197 static signed portBASE_TYPE prvIsQueueEmpty( const xQUEUE *pxQueue ) PRIVILEGED_FUNCTION;
\r
200 * Uses a critical section to determine if there is any space in a queue.
\r
202 * @return pdTRUE if there is no space, otherwise pdFALSE;
\r
204 static signed portBASE_TYPE prvIsQueueFull( const xQUEUE *pxQueue ) PRIVILEGED_FUNCTION;
\r
207 * Copies an item into the queue, either at the front of the queue or the
\r
208 * back of the queue.
\r
210 static void prvCopyDataToQueue( xQUEUE *pxQueue, const void *pvItemToQueue, portBASE_TYPE xPosition ) PRIVILEGED_FUNCTION;
\r
213 * Copies an item out of a queue.
\r
215 static void prvCopyDataFromQueue( xQUEUE * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
\r
217 #if ( configUSE_QUEUE_SETS == 1 )
\r
219 * Checks to see if a queue is a member of a queue set, and if so, notifies
\r
220 * the queue set that the queue contains data.
\r
222 static portBASE_TYPE prvNotifyQueueSetContainer( const xQUEUE * const pxQueue, portBASE_TYPE xCopyPosition ) PRIVILEGED_FUNCTION;
\r
225 /*-----------------------------------------------------------*/
\r
228 * Macro to mark a queue as locked. Locking a queue prevents an ISR from
\r
229 * accessing the queue event lists.
\r
231 #define prvLockQueue( pxQueue ) \
\r
232 taskENTER_CRITICAL(); \
\r
234 if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
\r
236 ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
\r
238 if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
\r
240 ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
\r
243 taskEXIT_CRITICAL()
\r
244 /*-----------------------------------------------------------*/
\r
246 portBASE_TYPE xQueueGenericReset( xQueueHandle xQueue, portBASE_TYPE xNewQueue )
\r
248 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
250 configASSERT( pxQueue );
\r
252 taskENTER_CRITICAL();
\r
254 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
\r
255 pxQueue->uxMessagesWaiting = ( unsigned portBASE_TYPE ) 0U;
\r
256 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
257 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( unsigned portBASE_TYPE ) 1U ) * pxQueue->uxItemSize );
\r
258 pxQueue->xRxLock = queueUNLOCKED;
\r
259 pxQueue->xTxLock = queueUNLOCKED;
\r
261 if( xNewQueue == pdFALSE )
\r
263 /* If there are tasks blocked waiting to read from the queue, then
\r
264 the tasks will remain blocked as after this function exits the queue
\r
265 will still be empty. If there are tasks blocked waiting to write to
\r
266 the queue, then one should be unblocked as after this function exits
\r
267 it will be possible to write to it. */
\r
268 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
270 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
272 queueYIELD_IF_USING_PREEMPTION();
\r
276 mtCOVERAGE_TEST_MARKER();
\r
281 mtCOVERAGE_TEST_MARKER();
\r
286 /* Ensure the event queues start in the correct state. */
\r
287 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
\r
288 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
\r
291 taskEXIT_CRITICAL();
\r
293 /* A value is returned for calling semantic consistency with previous
\r
297 /*-----------------------------------------------------------*/
\r
299 xQueueHandle xQueueGenericCreate( unsigned portBASE_TYPE uxQueueLength, unsigned portBASE_TYPE uxItemSize, unsigned char ucQueueType )
\r
301 xQUEUE *pxNewQueue;
\r
302 size_t xQueueSizeInBytes;
\r
303 xQueueHandle xReturn = NULL;
\r
305 /* Remove compiler warnings about unused parameters should
\r
306 configUSE_TRACE_FACILITY not be set to 1. */
\r
307 ( void ) ucQueueType;
\r
309 /* Allocate the new queue structure. */
\r
310 if( uxQueueLength > ( unsigned portBASE_TYPE ) 0 )
\r
312 pxNewQueue = ( xQUEUE * ) pvPortMalloc( sizeof( xQUEUE ) );
\r
313 if( pxNewQueue != NULL )
\r
315 /* Create the list of pointers to queue items. The queue is one byte
\r
316 longer than asked for to make wrap checking easier/faster. */
\r
317 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
319 pxNewQueue->pcHead = ( signed char * ) pvPortMalloc( xQueueSizeInBytes );
\r
320 if( pxNewQueue->pcHead != NULL )
\r
322 /* Initialise the queue members as described above where the
\r
323 queue type is defined. */
\r
324 pxNewQueue->uxLength = uxQueueLength;
\r
325 pxNewQueue->uxItemSize = uxItemSize;
\r
326 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
\r
328 #if ( configUSE_TRACE_FACILITY == 1 )
\r
330 pxNewQueue->ucQueueType = ucQueueType;
\r
332 #endif /* configUSE_TRACE_FACILITY */
\r
334 #if( configUSE_QUEUE_SETS == 1 )
\r
336 pxNewQueue->pxQueueSetContainer = NULL;
\r
338 #endif /* configUSE_QUEUE_SETS */
\r
340 traceQUEUE_CREATE( pxNewQueue );
\r
341 xReturn = pxNewQueue;
\r
345 traceQUEUE_CREATE_FAILED( ucQueueType );
\r
346 vPortFree( pxNewQueue );
\r
351 mtCOVERAGE_TEST_MARKER();
\r
356 mtCOVERAGE_TEST_MARKER();
\r
359 configASSERT( xReturn );
\r
363 /*-----------------------------------------------------------*/
\r
365 #if ( configUSE_MUTEXES == 1 )
\r
367 xQueueHandle xQueueCreateMutex( unsigned char ucQueueType )
\r
369 xQUEUE *pxNewQueue;
\r
371 /* Prevent compiler warnings about unused parameters if
\r
372 configUSE_TRACE_FACILITY does not equal 1. */
\r
373 ( void ) ucQueueType;
\r
375 /* Allocate the new queue structure. */
\r
376 pxNewQueue = ( xQUEUE * ) pvPortMalloc( sizeof( xQUEUE ) );
\r
377 if( pxNewQueue != NULL )
\r
379 /* Information required for priority inheritance. */
\r
380 pxNewQueue->pxMutexHolder = NULL;
\r
381 pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
\r
383 /* Queues used as a mutex no data is actually copied into or out
\r
385 pxNewQueue->pcWriteTo = NULL;
\r
386 pxNewQueue->u.pcReadFrom = NULL;
\r
388 /* Each mutex has a length of 1 (like a binary semaphore) and
\r
389 an item size of 0 as nothing is actually copied into or out
\r
391 pxNewQueue->uxMessagesWaiting = ( unsigned portBASE_TYPE ) 0U;
\r
392 pxNewQueue->uxLength = ( unsigned portBASE_TYPE ) 1U;
\r
393 pxNewQueue->uxItemSize = ( unsigned portBASE_TYPE ) 0U;
\r
394 pxNewQueue->xRxLock = queueUNLOCKED;
\r
395 pxNewQueue->xTxLock = queueUNLOCKED;
\r
397 #if ( configUSE_TRACE_FACILITY == 1 )
\r
399 pxNewQueue->ucQueueType = ucQueueType;
\r
403 #if ( configUSE_QUEUE_SETS == 1 )
\r
405 pxNewQueue->pxQueueSetContainer = NULL;
\r
409 /* Ensure the event queues start with the correct state. */
\r
410 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
\r
411 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
\r
413 traceCREATE_MUTEX( pxNewQueue );
\r
415 /* Start with the semaphore in the expected state. */
\r
416 ( void ) xQueueGenericSend( pxNewQueue, NULL, ( portTickType ) 0U, queueSEND_TO_BACK );
\r
420 traceCREATE_MUTEX_FAILED();
\r
423 configASSERT( pxNewQueue );
\r
427 #endif /* configUSE_MUTEXES */
\r
428 /*-----------------------------------------------------------*/
\r
430 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
\r
432 void* xQueueGetMutexHolder( xQueueHandle xSemaphore )
\r
436 /* This function is called by xSemaphoreGetMutexHolder(), and should not
\r
437 be called directly. Note: This is a good way of determining if the
\r
438 calling task is the mutex holder, but not a good way of determining the
\r
439 identity of the mutex holder, as the holder may change between the
\r
440 following critical section exiting and the function returning. */
\r
441 taskENTER_CRITICAL();
\r
443 if( ( ( xQUEUE * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
\r
445 pxReturn = ( void * ) ( ( xQUEUE * ) xSemaphore )->pxMutexHolder;
\r
452 taskEXIT_CRITICAL();
\r
458 /*-----------------------------------------------------------*/
\r
460 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
462 portBASE_TYPE xQueueGiveMutexRecursive( xQueueHandle xMutex )
\r
464 portBASE_TYPE xReturn;
\r
465 xQUEUE * const pxMutex = ( xQUEUE * ) xMutex;
\r
467 configASSERT( pxMutex );
\r
469 /* If this is the task that holds the mutex then pxMutexHolder will not
\r
470 change outside of this task. If this task does not hold the mutex then
\r
471 pxMutexHolder can never coincidentally equal the tasks handle, and as
\r
472 this is the only condition we are interested in it does not matter if
\r
473 pxMutexHolder is accessed simultaneously by another task. Therefore no
\r
474 mutual exclusion is required to test the pxMutexHolder variable. */
\r
475 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as xTaskHandle is a typedef. */
\r
477 traceGIVE_MUTEX_RECURSIVE( pxMutex );
\r
479 /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
\r
480 the task handle, therefore no underflow check is required. Also,
\r
481 uxRecursiveCallCount is only modified by the mutex holder, and as
\r
482 there can only be one, no mutual exclusion is required to modify the
\r
483 uxRecursiveCallCount member. */
\r
484 ( pxMutex->u.uxRecursiveCallCount )--;
\r
486 /* Have we unwound the call count? */
\r
487 if( pxMutex->u.uxRecursiveCallCount == ( unsigned portBASE_TYPE ) 0 )
\r
489 /* Return the mutex. This will automatically unblock any other
\r
490 task that might be waiting to access the mutex. */
\r
491 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
\r
495 mtCOVERAGE_TEST_MARKER();
\r
502 /* We cannot give the mutex because we are not the holder. */
\r
505 traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
511 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
512 /*-----------------------------------------------------------*/
\r
514 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
\r
516 portBASE_TYPE xQueueTakeMutexRecursive( xQueueHandle xMutex, portTickType xBlockTime )
\r
518 portBASE_TYPE xReturn;
\r
519 xQUEUE * const pxMutex = ( xQUEUE * ) xMutex;
\r
521 configASSERT( pxMutex );
\r
523 /* Comments regarding mutual exclusion as per those within
\r
524 xQueueGiveMutexRecursive(). */
\r
526 traceTAKE_MUTEX_RECURSIVE( pxMutex );
\r
528 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as xTaskHandle is a typedef. */
\r
530 ( pxMutex->u.uxRecursiveCallCount )++;
\r
535 xReturn = xQueueGenericReceive( pxMutex, NULL, xBlockTime, pdFALSE );
\r
537 /* pdPASS will only be returned if we successfully obtained the mutex,
\r
538 we may have blocked to reach here. */
\r
539 if( xReturn == pdPASS )
\r
541 ( pxMutex->u.uxRecursiveCallCount )++;
\r
545 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
\r
552 #endif /* configUSE_RECURSIVE_MUTEXES */
\r
553 /*-----------------------------------------------------------*/
\r
555 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
\r
557 xQueueHandle xQueueCreateCountingSemaphore( unsigned portBASE_TYPE uxMaxCount, unsigned portBASE_TYPE uxInitialCount )
\r
559 xQueueHandle xHandle;
\r
561 configASSERT( uxMaxCount != 0 );
\r
562 configASSERT( uxInitialCount <= uxMaxCount );
\r
564 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
\r
566 if( xHandle != NULL )
\r
568 ( ( xQUEUE * ) xHandle )->uxMessagesWaiting = uxInitialCount;
\r
570 traceCREATE_COUNTING_SEMAPHORE();
\r
574 traceCREATE_COUNTING_SEMAPHORE_FAILED();
\r
577 configASSERT( xHandle );
\r
581 #endif /* configUSE_COUNTING_SEMAPHORES */
\r
582 /*-----------------------------------------------------------*/
\r
584 signed portBASE_TYPE xQueueGenericSend( xQueueHandle xQueue, const void * const pvItemToQueue, portTickType xTicksToWait, portBASE_TYPE xCopyPosition )
\r
586 signed portBASE_TYPE xEntryTimeSet = pdFALSE;
\r
587 xTimeOutType xTimeOut;
\r
588 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
590 configASSERT( pxQueue );
\r
591 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( unsigned portBASE_TYPE ) 0U ) ) );
\r
592 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
593 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
595 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
600 /* This function relaxes the coding standard somewhat to allow return
\r
601 statements within the function itself. This is done in the interest
\r
602 of execution time efficiency. */
\r
605 taskENTER_CRITICAL();
\r
607 /* Is there room on the queue now? The running task must be
\r
608 the highest priority task wanting to access the queue. If
\r
609 the head item in the queue is to be overwritten then it does
\r
610 not matter if the queue is full. */
\r
611 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
613 traceQUEUE_SEND( pxQueue );
\r
614 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
616 #if ( configUSE_QUEUE_SETS == 1 )
\r
618 if( pxQueue->pxQueueSetContainer != NULL )
\r
620 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
622 /* The queue is a member of a queue set, and posting
\r
623 to the queue set caused a higher priority task to
\r
624 unblock. A context switch is required. */
\r
625 queueYIELD_IF_USING_PREEMPTION();
\r
629 mtCOVERAGE_TEST_MARKER();
\r
634 /* If there was a task waiting for data to arrive on the
\r
635 queue then unblock it now. */
\r
636 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
638 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
640 /* The unblocked task has a priority higher than
\r
641 our own so yield immediately. Yes it is ok to
\r
642 do this from within the critical section - the
\r
643 kernel takes care of that. */
\r
644 queueYIELD_IF_USING_PREEMPTION();
\r
648 mtCOVERAGE_TEST_MARKER();
\r
653 mtCOVERAGE_TEST_MARKER();
\r
657 #else /* configUSE_QUEUE_SETS */
\r
659 /* If there was a task waiting for data to arrive on the
\r
660 queue then unblock it now. */
\r
661 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
663 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
665 /* The unblocked task has a priority higher than
\r
666 our own so yield immediately. Yes it is ok to do
\r
667 this from within the critical section - the kernel
\r
668 takes care of that. */
\r
669 queueYIELD_IF_USING_PREEMPTION();
\r
673 mtCOVERAGE_TEST_MARKER();
\r
678 mtCOVERAGE_TEST_MARKER();
\r
681 #endif /* configUSE_QUEUE_SETS */
\r
683 taskEXIT_CRITICAL();
\r
685 /* Return to the original privilege level before exiting the
\r
691 if( xTicksToWait == ( portTickType ) 0 )
\r
693 /* The queue was full and no block time is specified (or
\r
694 the block time has expired) so leave now. */
\r
695 taskEXIT_CRITICAL();
\r
697 /* Return to the original privilege level before exiting
\r
699 traceQUEUE_SEND_FAILED( pxQueue );
\r
700 return errQUEUE_FULL;
\r
702 else if( xEntryTimeSet == pdFALSE )
\r
704 /* The queue was full and a block time was specified so
\r
705 configure the timeout structure. */
\r
706 vTaskSetTimeOutState( &xTimeOut );
\r
707 xEntryTimeSet = pdTRUE;
\r
711 /* Entry time was already set. */
\r
712 mtCOVERAGE_TEST_MARKER();
\r
716 taskEXIT_CRITICAL();
\r
718 /* Interrupts and other tasks can send to and receive from the queue
\r
719 now the critical section has been exited. */
\r
722 prvLockQueue( pxQueue );
\r
724 /* Update the timeout state to see if it has expired yet. */
\r
725 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
727 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
729 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
730 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
732 /* Unlocking the queue means queue events can effect the
\r
733 event list. It is possible that interrupts occurring now
\r
734 remove this task from the event list again - but as the
\r
735 scheduler is suspended the task will go onto the pending
\r
736 ready last instead of the actual ready list. */
\r
737 prvUnlockQueue( pxQueue );
\r
739 /* Resuming the scheduler will move tasks from the pending
\r
740 ready list into the ready list - so it is feasible that this
\r
741 task is already in a ready list before it yields - in which
\r
742 case the yield will not cause a context switch unless there
\r
743 is also a higher priority task in the pending ready list. */
\r
744 if( xTaskResumeAll() == pdFALSE )
\r
746 portYIELD_WITHIN_API();
\r
752 prvUnlockQueue( pxQueue );
\r
753 ( void ) xTaskResumeAll();
\r
758 /* The timeout has expired. */
\r
759 prvUnlockQueue( pxQueue );
\r
760 ( void ) xTaskResumeAll();
\r
762 /* Return to the original privilege level before exiting the
\r
764 traceQUEUE_SEND_FAILED( pxQueue );
\r
765 return errQUEUE_FULL;
\r
769 /*-----------------------------------------------------------*/
\r
771 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
773 signed portBASE_TYPE xQueueAltGenericSend( xQueueHandle xQueue, const void * const pvItemToQueue, portTickType xTicksToWait, portBASE_TYPE xCopyPosition )
\r
775 signed portBASE_TYPE xEntryTimeSet = pdFALSE;
\r
776 xTimeOutType xTimeOut;
\r
777 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
779 configASSERT( pxQueue );
\r
780 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( unsigned portBASE_TYPE ) 0U ) ) );
\r
784 taskENTER_CRITICAL();
\r
786 /* Is there room on the queue now? To be running we must be
\r
787 the highest priority task wanting to access the queue. */
\r
788 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
790 traceQUEUE_SEND( pxQueue );
\r
791 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
793 /* If there was a task waiting for data to arrive on the
\r
794 queue then unblock it now. */
\r
795 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
797 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
\r
799 /* The unblocked task has a priority higher than
\r
800 our own so yield immediately. */
\r
801 portYIELD_WITHIN_API();
\r
805 mtCOVERAGE_TEST_MARKER();
\r
810 mtCOVERAGE_TEST_MARKER();
\r
813 taskEXIT_CRITICAL();
\r
818 if( xTicksToWait == ( portTickType ) 0 )
\r
820 taskEXIT_CRITICAL();
\r
821 return errQUEUE_FULL;
\r
823 else if( xEntryTimeSet == pdFALSE )
\r
825 vTaskSetTimeOutState( &xTimeOut );
\r
826 xEntryTimeSet = pdTRUE;
\r
830 taskEXIT_CRITICAL();
\r
832 taskENTER_CRITICAL();
\r
834 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
836 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
838 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
\r
839 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
\r
840 portYIELD_WITHIN_API();
\r
844 mtCOVERAGE_TEST_MARKER();
\r
849 taskEXIT_CRITICAL();
\r
850 traceQUEUE_SEND_FAILED( pxQueue );
\r
851 return errQUEUE_FULL;
\r
854 taskEXIT_CRITICAL();
\r
858 #endif /* configUSE_ALTERNATIVE_API */
\r
859 /*-----------------------------------------------------------*/
\r
861 #if ( configUSE_ALTERNATIVE_API == 1 )
\r
863 signed portBASE_TYPE xQueueAltGenericReceive( xQueueHandle xQueue, void * const pvBuffer, portTickType xTicksToWait, portBASE_TYPE xJustPeeking )
\r
865 signed portBASE_TYPE xEntryTimeSet = pdFALSE;
\r
866 xTimeOutType xTimeOut;
\r
867 signed char *pcOriginalReadPosition;
\r
868 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
870 configASSERT( pxQueue );
\r
871 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( unsigned portBASE_TYPE ) 0U ) ) );
\r
875 taskENTER_CRITICAL();
\r
877 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
\r
879 /* Remember our read position in case we are just peeking. */
\r
880 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
882 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
884 if( xJustPeeking == pdFALSE )
\r
886 traceQUEUE_RECEIVE( pxQueue );
\r
888 /* Data is actually being removed (not just peeked). */
\r
889 --( pxQueue->uxMessagesWaiting );
\r
891 #if ( configUSE_MUTEXES == 1 )
\r
893 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
895 /* Record the information required to implement
\r
896 priority inheritance should it become necessary. */
\r
897 pxQueue->pxMutexHolder = ( signed char * ) xTaskGetCurrentTaskHandle();
\r
901 mtCOVERAGE_TEST_MARKER();
\r
906 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
908 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
910 portYIELD_WITHIN_API();
\r
914 mtCOVERAGE_TEST_MARKER();
\r
920 traceQUEUE_PEEK( pxQueue );
\r
922 /* We are not removing the data, so reset our read
\r
924 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
926 /* The data is being left in the queue, so see if there are
\r
927 any other tasks waiting for the data. */
\r
928 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
930 /* Tasks that are removed from the event list will get added to
\r
931 the pending ready list as the scheduler is still suspended. */
\r
932 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
934 /* The task waiting has a higher priority than this task. */
\r
935 portYIELD_WITHIN_API();
\r
939 mtCOVERAGE_TEST_MARKER();
\r
944 mtCOVERAGE_TEST_MARKER();
\r
948 taskEXIT_CRITICAL();
\r
953 if( xTicksToWait == ( portTickType ) 0 )
\r
955 taskEXIT_CRITICAL();
\r
956 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
957 return errQUEUE_EMPTY;
\r
959 else if( xEntryTimeSet == pdFALSE )
\r
961 vTaskSetTimeOutState( &xTimeOut );
\r
962 xEntryTimeSet = pdTRUE;
\r
966 taskEXIT_CRITICAL();
\r
968 taskENTER_CRITICAL();
\r
970 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
972 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
974 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
976 #if ( configUSE_MUTEXES == 1 )
\r
978 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
980 taskENTER_CRITICAL();
\r
982 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
984 taskEXIT_CRITICAL();
\r
988 mtCOVERAGE_TEST_MARKER();
\r
993 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
994 portYIELD_WITHIN_API();
\r
998 mtCOVERAGE_TEST_MARKER();
\r
1003 taskEXIT_CRITICAL();
\r
1004 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1005 return errQUEUE_EMPTY;
\r
1008 taskEXIT_CRITICAL();
\r
1013 #endif /* configUSE_ALTERNATIVE_API */
\r
1014 /*-----------------------------------------------------------*/
\r
1016 signed portBASE_TYPE xQueueGenericSendFromISR( xQueueHandle xQueue, const void * const pvItemToQueue, signed portBASE_TYPE *pxHigherPriorityTaskWoken, portBASE_TYPE xCopyPosition )
\r
1018 signed portBASE_TYPE xReturn;
\r
1019 unsigned portBASE_TYPE uxSavedInterruptStatus;
\r
1020 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
1022 configASSERT( pxQueue );
\r
1023 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( unsigned portBASE_TYPE ) 0U ) ) );
\r
1024 configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
\r
1026 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1027 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1028 above the maximum system call priority are kept permanently enabled, even
\r
1029 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1030 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1031 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1032 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1033 assigned a priority above the configured maximum system call priority.
\r
1034 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1035 that have been assigned a priority at or (logically) below the maximum
\r
1036 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1037 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1038 More information (albeit Cortex-M specific) is provided on the following
\r
1039 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1040 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1042 /* Similar to xQueueGenericSend, except we don't block if there is no room
\r
1043 in the queue. Also we don't directly wake a task that was blocked on a
\r
1044 queue read, instead we return a flag to say whether a context switch is
\r
1045 required or not (i.e. has a task with a higher priority than us been woken
\r
1047 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1049 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
\r
1051 traceQUEUE_SEND_FROM_ISR( pxQueue );
\r
1053 prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
\r
1055 /* If the queue is locked we do not alter the event list. This will
\r
1056 be done when the queue is unlocked later. */
\r
1057 if( pxQueue->xTxLock == queueUNLOCKED )
\r
1059 #if ( configUSE_QUEUE_SETS == 1 )
\r
1061 if( pxQueue->pxQueueSetContainer != NULL )
\r
1063 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
\r
1065 /* The queue is a member of a queue set, and posting
\r
1066 to the queue set caused a higher priority task to
\r
1067 unblock. A context switch is required. */
\r
1068 if( pxHigherPriorityTaskWoken != NULL )
\r
1070 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1074 mtCOVERAGE_TEST_MARKER();
\r
1079 mtCOVERAGE_TEST_MARKER();
\r
1084 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1086 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1088 /* The task waiting has a higher priority so record that a
\r
1089 context switch is required. */
\r
1090 if( pxHigherPriorityTaskWoken != NULL )
\r
1092 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1096 mtCOVERAGE_TEST_MARKER();
\r
1101 mtCOVERAGE_TEST_MARKER();
\r
1106 mtCOVERAGE_TEST_MARKER();
\r
1110 #else /* configUSE_QUEUE_SETS */
\r
1112 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1114 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1116 /* The task waiting has a higher priority so record that a
\r
1117 context switch is required. */
\r
1118 if( pxHigherPriorityTaskWoken != NULL )
\r
1120 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1124 mtCOVERAGE_TEST_MARKER();
\r
1129 mtCOVERAGE_TEST_MARKER();
\r
1134 mtCOVERAGE_TEST_MARKER();
\r
1137 #endif /* configUSE_QUEUE_SETS */
\r
1141 /* Increment the lock count so the task that unlocks the queue
\r
1142 knows that data was posted while it was locked. */
\r
1143 ++( pxQueue->xTxLock );
\r
1150 traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
\r
1151 xReturn = errQUEUE_FULL;
\r
1154 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1158 /*-----------------------------------------------------------*/
\r
1160 signed portBASE_TYPE xQueueGenericReceive( xQueueHandle xQueue, void * const pvBuffer, portTickType xTicksToWait, portBASE_TYPE xJustPeeking )
\r
1162 signed portBASE_TYPE xEntryTimeSet = pdFALSE;
\r
1163 xTimeOutType xTimeOut;
\r
1164 signed char *pcOriginalReadPosition;
\r
1165 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
1167 configASSERT( pxQueue );
\r
1168 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( unsigned portBASE_TYPE ) 0U ) ) );
\r
1169 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
\r
1171 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
\r
1175 /* This function relaxes the coding standard somewhat to allow return
\r
1176 statements within the function itself. This is done in the interest
\r
1177 of execution time efficiency. */
\r
1181 taskENTER_CRITICAL();
\r
1183 /* Is there data in the queue now? To be running we must be
\r
1184 the highest priority task wanting to access the queue. */
\r
1185 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
\r
1187 /* Remember the read position in case the queue is only being
\r
1189 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1191 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1193 if( xJustPeeking == pdFALSE )
\r
1195 traceQUEUE_RECEIVE( pxQueue );
\r
1197 /* Actually removing data, not just peeking. */
\r
1198 --( pxQueue->uxMessagesWaiting );
\r
1200 #if ( configUSE_MUTEXES == 1 )
\r
1202 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1204 /* Record the information required to implement
\r
1205 priority inheritance should it become necessary. */
\r
1206 pxQueue->pxMutexHolder = ( signed char * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as xTaskHandle is a typedef. */
\r
1210 mtCOVERAGE_TEST_MARKER();
\r
1215 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1217 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
\r
1219 queueYIELD_IF_USING_PREEMPTION();
\r
1223 mtCOVERAGE_TEST_MARKER();
\r
1228 mtCOVERAGE_TEST_MARKER();
\r
1233 traceQUEUE_PEEK( pxQueue );
\r
1235 /* The data is not being removed, so reset the read
\r
1237 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1239 /* The data is being left in the queue, so see if there are
\r
1240 any other tasks waiting for the data. */
\r
1241 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1243 /* Tasks that are removed from the event list will get added to
\r
1244 the pending ready list as the scheduler is still suspended. */
\r
1245 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1247 /* The task waiting has a higher priority than this task. */
\r
1248 queueYIELD_IF_USING_PREEMPTION();
\r
1252 mtCOVERAGE_TEST_MARKER();
\r
1257 mtCOVERAGE_TEST_MARKER();
\r
1261 taskEXIT_CRITICAL();
\r
1266 if( xTicksToWait == ( portTickType ) 0 )
\r
1268 /* The queue was empty and no block time is specified (or
\r
1269 the block time has expired) so leave now. */
\r
1270 taskEXIT_CRITICAL();
\r
1271 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1272 return errQUEUE_EMPTY;
\r
1274 else if( xEntryTimeSet == pdFALSE )
\r
1276 /* The queue was empty and a block time was specified so
\r
1277 configure the timeout structure. */
\r
1278 vTaskSetTimeOutState( &xTimeOut );
\r
1279 xEntryTimeSet = pdTRUE;
\r
1283 /* Entry time was already set. */
\r
1284 mtCOVERAGE_TEST_MARKER();
\r
1288 taskEXIT_CRITICAL();
\r
1290 /* Interrupts and other tasks can send to and receive from the queue
\r
1291 now the critical section has been exited. */
\r
1293 vTaskSuspendAll();
\r
1294 prvLockQueue( pxQueue );
\r
1296 /* Update the timeout state to see if it has expired yet. */
\r
1297 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
\r
1299 if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
\r
1301 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
\r
1303 #if ( configUSE_MUTEXES == 1 )
\r
1305 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1307 taskENTER_CRITICAL();
\r
1309 vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
\r
1311 taskEXIT_CRITICAL();
\r
1315 mtCOVERAGE_TEST_MARKER();
\r
1320 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
1321 prvUnlockQueue( pxQueue );
\r
1322 if( xTaskResumeAll() == pdFALSE )
\r
1324 portYIELD_WITHIN_API();
\r
1328 mtCOVERAGE_TEST_MARKER();
\r
1334 prvUnlockQueue( pxQueue );
\r
1335 ( void ) xTaskResumeAll();
\r
1340 prvUnlockQueue( pxQueue );
\r
1341 ( void ) xTaskResumeAll();
\r
1342 traceQUEUE_RECEIVE_FAILED( pxQueue );
\r
1343 return errQUEUE_EMPTY;
\r
1347 /*-----------------------------------------------------------*/
\r
1349 signed portBASE_TYPE xQueueReceiveFromISR( xQueueHandle xQueue, void * const pvBuffer, signed portBASE_TYPE *pxHigherPriorityTaskWoken )
\r
1351 signed portBASE_TYPE xReturn;
\r
1352 unsigned portBASE_TYPE uxSavedInterruptStatus;
\r
1353 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
1355 configASSERT( pxQueue );
\r
1356 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( unsigned portBASE_TYPE ) 0U ) ) );
\r
1358 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1359 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1360 above the maximum system call priority are kept permanently enabled, even
\r
1361 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1362 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1363 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1364 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1365 assigned a priority above the configured maximum system call priority.
\r
1366 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1367 that have been assigned a priority at or (logically) below the maximum
\r
1368 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1369 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1370 More information (albeit Cortex-M specific) is provided on the following
\r
1371 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1372 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1374 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1376 /* Cannot block in an ISR, so check there is data available. */
\r
1377 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
\r
1379 traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
\r
1381 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1382 --( pxQueue->uxMessagesWaiting );
\r
1384 /* If the queue is locked the event list will not be modified.
\r
1385 Instead update the lock count so the task that unlocks the queue
\r
1386 will know that an ISR has removed data while the queue was
\r
1388 if( pxQueue->xRxLock == queueUNLOCKED )
\r
1390 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1392 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1394 /* The task waiting has a higher priority than us so
\r
1395 force a context switch. */
\r
1396 if( pxHigherPriorityTaskWoken != NULL )
\r
1398 *pxHigherPriorityTaskWoken = pdTRUE;
\r
1402 mtCOVERAGE_TEST_MARKER();
\r
1407 mtCOVERAGE_TEST_MARKER();
\r
1412 mtCOVERAGE_TEST_MARKER();
\r
1417 /* Increment the lock count so the task that unlocks the queue
\r
1418 knows that data was removed while it was locked. */
\r
1419 ++( pxQueue->xRxLock );
\r
1427 traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
\r
1430 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1434 /*-----------------------------------------------------------*/
\r
1436 signed portBASE_TYPE xQueuePeekFromISR( xQueueHandle xQueue, void * const pvBuffer )
\r
1438 signed portBASE_TYPE xReturn;
\r
1439 unsigned portBASE_TYPE uxSavedInterruptStatus;
\r
1440 signed char *pcOriginalReadPosition;
\r
1441 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
1443 configASSERT( pxQueue );
\r
1444 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( unsigned portBASE_TYPE ) 0U ) ) );
\r
1446 /* RTOS ports that support interrupt nesting have the concept of a maximum
\r
1447 system call (or maximum API call) interrupt priority. Interrupts that are
\r
1448 above the maximum system call priority are kept permanently enabled, even
\r
1449 when the RTOS kernel is in a critical section, but cannot make any calls to
\r
1450 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
\r
1451 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
\r
1452 failure if a FreeRTOS API function is called from an interrupt that has been
\r
1453 assigned a priority above the configured maximum system call priority.
\r
1454 Only FreeRTOS functions that end in FromISR can be called from interrupts
\r
1455 that have been assigned a priority at or (logically) below the maximum
\r
1456 system call interrupt priority. FreeRTOS maintains a separate interrupt
\r
1457 safe API to ensure interrupt entry is as fast and as simple as possible.
\r
1458 More information (albeit Cortex-M specific) is provided on the following
\r
1459 link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
\r
1460 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
\r
1462 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
\r
1464 /* Cannot block in an ISR, so check there is data available. */
\r
1465 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
\r
1467 traceQUEUE_PEEK_FROM_ISR( pxQueue );
\r
1469 /* Remember the read position so it can be reset as nothing is
\r
1470 actually being removed from the queue. */
\r
1471 pcOriginalReadPosition = pxQueue->u.pcReadFrom;
\r
1472 prvCopyDataFromQueue( pxQueue, pvBuffer );
\r
1473 pxQueue->u.pcReadFrom = pcOriginalReadPosition;
\r
1480 traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
\r
1483 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
\r
1487 /*-----------------------------------------------------------*/
\r
1489 unsigned portBASE_TYPE uxQueueMessagesWaiting( const xQueueHandle xQueue )
\r
1491 unsigned portBASE_TYPE uxReturn;
\r
1493 configASSERT( xQueue );
\r
1495 taskENTER_CRITICAL();
\r
1497 uxReturn = ( ( xQUEUE * ) xQueue )->uxMessagesWaiting;
\r
1499 taskEXIT_CRITICAL();
\r
1502 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1503 /*-----------------------------------------------------------*/
\r
1505 unsigned portBASE_TYPE uxQueueSpacesAvailable( const xQueueHandle xQueue )
\r
1507 unsigned portBASE_TYPE uxReturn;
\r
1510 pxQueue = ( xQUEUE * ) xQueue;
\r
1511 configASSERT( pxQueue );
\r
1513 taskENTER_CRITICAL();
\r
1515 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
\r
1517 taskEXIT_CRITICAL();
\r
1520 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1521 /*-----------------------------------------------------------*/
\r
1523 unsigned portBASE_TYPE uxQueueMessagesWaitingFromISR( const xQueueHandle xQueue )
\r
1525 unsigned portBASE_TYPE uxReturn;
\r
1527 configASSERT( xQueue );
\r
1529 uxReturn = ( ( xQUEUE * ) xQueue )->uxMessagesWaiting;
\r
1532 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
\r
1533 /*-----------------------------------------------------------*/
\r
1535 void vQueueDelete( xQueueHandle xQueue )
\r
1537 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
1539 configASSERT( pxQueue );
\r
1541 traceQUEUE_DELETE( pxQueue );
\r
1542 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
1544 vQueueUnregisterQueue( pxQueue );
\r
1547 vPortFree( pxQueue->pcHead );
\r
1548 vPortFree( pxQueue );
\r
1550 /*-----------------------------------------------------------*/
\r
1552 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1554 unsigned portBASE_TYPE uxQueueGetQueueNumber( xQueueHandle xQueue )
\r
1556 return ( ( xQUEUE * ) xQueue )->uxQueueNumber;
\r
1559 #endif /* configUSE_TRACE_FACILITY */
\r
1560 /*-----------------------------------------------------------*/
\r
1562 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1564 void vQueueSetQueueNumber( xQueueHandle xQueue, unsigned portBASE_TYPE uxQueueNumber )
\r
1566 ( ( xQUEUE * ) xQueue )->uxQueueNumber = uxQueueNumber;
\r
1569 #endif /* configUSE_TRACE_FACILITY */
\r
1570 /*-----------------------------------------------------------*/
\r
1572 #if ( configUSE_TRACE_FACILITY == 1 )
\r
1574 unsigned char ucQueueGetQueueType( xQueueHandle xQueue )
\r
1576 return ( ( xQUEUE * ) xQueue )->ucQueueType;
\r
1579 #endif /* configUSE_TRACE_FACILITY */
\r
1580 /*-----------------------------------------------------------*/
\r
1582 static void prvCopyDataToQueue( xQUEUE *pxQueue, const void *pvItemToQueue, portBASE_TYPE xPosition )
\r
1584 if( pxQueue->uxItemSize == ( unsigned portBASE_TYPE ) 0 )
\r
1586 #if ( configUSE_MUTEXES == 1 )
\r
1588 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
\r
1590 /* The mutex is no longer being held. */
\r
1591 vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
\r
1592 pxQueue->pxMutexHolder = NULL;
\r
1596 mtCOVERAGE_TEST_MARKER();
\r
1599 #endif /* configUSE_MUTEXES */
\r
1601 else if( xPosition == queueSEND_TO_BACK )
\r
1603 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
\r
1604 pxQueue->pcWriteTo += pxQueue->uxItemSize;
\r
1605 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1607 pxQueue->pcWriteTo = pxQueue->pcHead;
\r
1611 mtCOVERAGE_TEST_MARKER();
\r
1616 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
\r
1617 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
\r
1618 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
\r
1620 pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
\r
1624 mtCOVERAGE_TEST_MARKER();
\r
1627 if( xPosition == queueOVERWRITE )
\r
1629 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
\r
1631 /* An item is not being added but overwritten, so subtract
\r
1632 one from the recorded number of items in the queue so when
\r
1633 one is added again below the number of recorded items remains
\r
1635 --( pxQueue->uxMessagesWaiting );
\r
1639 mtCOVERAGE_TEST_MARKER();
\r
1644 mtCOVERAGE_TEST_MARKER();
\r
1648 ++( pxQueue->uxMessagesWaiting );
\r
1650 /*-----------------------------------------------------------*/
\r
1652 static void prvCopyDataFromQueue( xQUEUE * const pxQueue, void * const pvBuffer )
\r
1654 if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )
\r
1656 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1657 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
\r
1659 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1663 mtCOVERAGE_TEST_MARKER();
\r
1665 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
\r
1669 mtCOVERAGE_TEST_MARKER();
\r
1672 /*-----------------------------------------------------------*/
\r
1674 static void prvUnlockQueue( xQUEUE *pxQueue )
\r
1676 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
\r
1678 /* The lock counts contains the number of extra data items placed or
\r
1679 removed from the queue while the queue was locked. When a queue is
\r
1680 locked items can be added or removed, but the event lists cannot be
\r
1682 taskENTER_CRITICAL();
\r
1684 /* See if data was added to the queue while it was locked. */
\r
1685 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
\r
1687 /* Data was posted while the queue was locked. Are any tasks
\r
1688 blocked waiting for data to become available? */
\r
1689 #if ( configUSE_QUEUE_SETS == 1 )
\r
1691 if( pxQueue->pxQueueSetContainer != NULL )
\r
1693 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
\r
1695 /* The queue is a member of a queue set, and posting to
\r
1696 the queue set caused a higher priority task to unblock.
\r
1697 A context switch is required. */
\r
1698 vTaskMissedYield();
\r
1702 mtCOVERAGE_TEST_MARKER();
\r
1707 /* Tasks that are removed from the event list will get added to
\r
1708 the pending ready list as the scheduler is still suspended. */
\r
1709 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1711 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1713 /* The task waiting has a higher priority so record that a
\r
1714 context switch is required. */
\r
1715 vTaskMissedYield();
\r
1719 mtCOVERAGE_TEST_MARKER();
\r
1728 #else /* configUSE_QUEUE_SETS */
\r
1730 /* Tasks that are removed from the event list will get added to
\r
1731 the pending ready list as the scheduler is still suspended. */
\r
1732 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1734 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1736 /* The task waiting has a higher priority so record that a
\r
1737 context switch is required. */
\r
1738 vTaskMissedYield();
\r
1742 mtCOVERAGE_TEST_MARKER();
\r
1750 #endif /* configUSE_QUEUE_SETS */
\r
1752 --( pxQueue->xTxLock );
\r
1755 pxQueue->xTxLock = queueUNLOCKED;
\r
1757 taskEXIT_CRITICAL();
\r
1759 /* Do the same for the Rx lock. */
\r
1760 taskENTER_CRITICAL();
\r
1762 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
\r
1764 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
1766 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
1768 vTaskMissedYield();
\r
1772 mtCOVERAGE_TEST_MARKER();
\r
1775 --( pxQueue->xRxLock );
\r
1783 pxQueue->xRxLock = queueUNLOCKED;
\r
1785 taskEXIT_CRITICAL();
\r
1787 /*-----------------------------------------------------------*/
\r
1789 static signed portBASE_TYPE prvIsQueueEmpty( const xQUEUE *pxQueue )
\r
1791 signed portBASE_TYPE xReturn;
\r
1793 taskENTER_CRITICAL();
\r
1795 if( pxQueue->uxMessagesWaiting == ( unsigned portBASE_TYPE ) 0 )
\r
1801 xReturn = pdFALSE;
\r
1804 taskEXIT_CRITICAL();
\r
1808 /*-----------------------------------------------------------*/
\r
1810 signed portBASE_TYPE xQueueIsQueueEmptyFromISR( const xQueueHandle xQueue )
\r
1812 signed portBASE_TYPE xReturn;
\r
1814 configASSERT( xQueue );
\r
1815 if( ( ( xQUEUE * ) xQueue )->uxMessagesWaiting == ( unsigned portBASE_TYPE ) 0 )
\r
1821 xReturn = pdFALSE;
\r
1825 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1826 /*-----------------------------------------------------------*/
\r
1828 static signed portBASE_TYPE prvIsQueueFull( const xQUEUE *pxQueue )
\r
1830 signed portBASE_TYPE xReturn;
\r
1832 taskENTER_CRITICAL();
\r
1834 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
\r
1840 xReturn = pdFALSE;
\r
1843 taskEXIT_CRITICAL();
\r
1847 /*-----------------------------------------------------------*/
\r
1849 signed portBASE_TYPE xQueueIsQueueFullFromISR( const xQueueHandle xQueue )
\r
1851 signed portBASE_TYPE xReturn;
\r
1853 configASSERT( xQueue );
\r
1854 if( ( ( xQUEUE * ) xQueue )->uxMessagesWaiting == ( ( xQUEUE * ) xQueue )->uxLength )
\r
1860 xReturn = pdFALSE;
\r
1864 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
1865 /*-----------------------------------------------------------*/
\r
1867 #if ( configUSE_CO_ROUTINES == 1 )
\r
1869 signed portBASE_TYPE xQueueCRSend( xQueueHandle xQueue, const void *pvItemToQueue, portTickType xTicksToWait )
\r
1871 signed portBASE_TYPE xReturn;
\r
1872 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
1874 /* If the queue is already full we may have to block. A critical section
\r
1875 is required to prevent an interrupt removing something from the queue
\r
1876 between the check to see if the queue is full and blocking on the queue. */
\r
1877 portDISABLE_INTERRUPTS();
\r
1879 if( prvIsQueueFull( pxQueue ) != pdFALSE )
\r
1881 /* The queue is full - do we want to block or just leave without
\r
1883 if( xTicksToWait > ( portTickType ) 0 )
\r
1885 /* As this is called from a coroutine we cannot block directly, but
\r
1886 return indicating that we need to block. */
\r
1887 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
\r
1888 portENABLE_INTERRUPTS();
\r
1889 return errQUEUE_BLOCKED;
\r
1893 portENABLE_INTERRUPTS();
\r
1894 return errQUEUE_FULL;
\r
1898 portENABLE_INTERRUPTS();
\r
1900 portDISABLE_INTERRUPTS();
\r
1902 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
1904 /* There is room in the queue, copy the data into the queue. */
\r
1905 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
1908 /* Were any co-routines waiting for data to become available? */
\r
1909 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
1911 /* In this instance the co-routine could be placed directly
\r
1912 into the ready list as we are within a critical section.
\r
1913 Instead the same pending ready list mechanism is used as if
\r
1914 the event were caused from within an interrupt. */
\r
1915 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
1917 /* The co-routine waiting has a higher priority so record
\r
1918 that a yield might be appropriate. */
\r
1919 xReturn = errQUEUE_YIELD;
\r
1923 mtCOVERAGE_TEST_MARKER();
\r
1928 mtCOVERAGE_TEST_MARKER();
\r
1933 xReturn = errQUEUE_FULL;
\r
1936 portENABLE_INTERRUPTS();
\r
1941 #endif /* configUSE_CO_ROUTINES */
\r
1942 /*-----------------------------------------------------------*/
\r
1944 #if ( configUSE_CO_ROUTINES == 1 )
\r
1946 signed portBASE_TYPE xQueueCRReceive( xQueueHandle xQueue, void *pvBuffer, portTickType xTicksToWait )
\r
1948 signed portBASE_TYPE xReturn;
\r
1949 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
1951 /* If the queue is already empty we may have to block. A critical section
\r
1952 is required to prevent an interrupt adding something to the queue
\r
1953 between the check to see if the queue is empty and blocking on the queue. */
\r
1954 portDISABLE_INTERRUPTS();
\r
1956 if( pxQueue->uxMessagesWaiting == ( unsigned portBASE_TYPE ) 0 )
\r
1958 /* There are no messages in the queue, do we want to block or just
\r
1959 leave with nothing? */
\r
1960 if( xTicksToWait > ( portTickType ) 0 )
\r
1962 /* As this is a co-routine we cannot block directly, but return
\r
1963 indicating that we need to block. */
\r
1964 vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
\r
1965 portENABLE_INTERRUPTS();
\r
1966 return errQUEUE_BLOCKED;
\r
1970 portENABLE_INTERRUPTS();
\r
1971 return errQUEUE_FULL;
\r
1976 mtCOVERAGE_TEST_MARKER();
\r
1979 portENABLE_INTERRUPTS();
\r
1981 portDISABLE_INTERRUPTS();
\r
1983 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
\r
1985 /* Data is available from the queue. */
\r
1986 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
1987 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
1989 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
1993 mtCOVERAGE_TEST_MARKER();
\r
1995 --( pxQueue->uxMessagesWaiting );
\r
1996 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2000 /* Were any co-routines waiting for space to become available? */
\r
2001 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2003 /* In this instance the co-routine could be placed directly
\r
2004 into the ready list as we are within a critical section.
\r
2005 Instead the same pending ready list mechanism is used as if
\r
2006 the event were caused from within an interrupt. */
\r
2007 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2009 xReturn = errQUEUE_YIELD;
\r
2013 mtCOVERAGE_TEST_MARKER();
\r
2018 mtCOVERAGE_TEST_MARKER();
\r
2026 portENABLE_INTERRUPTS();
\r
2031 #endif /* configUSE_CO_ROUTINES */
\r
2032 /*-----------------------------------------------------------*/
\r
2034 #if ( configUSE_CO_ROUTINES == 1 )
\r
2036 signed portBASE_TYPE xQueueCRSendFromISR( xQueueHandle xQueue, const void *pvItemToQueue, signed portBASE_TYPE xCoRoutinePreviouslyWoken )
\r
2038 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
2040 /* Cannot block within an ISR so if there is no space on the queue then
\r
2041 exit without doing anything. */
\r
2042 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
\r
2044 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
\r
2046 /* We only want to wake one co-routine per ISR, so check that a
\r
2047 co-routine has not already been woken. */
\r
2048 if( xCoRoutinePreviouslyWoken == pdFALSE )
\r
2050 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2052 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2058 mtCOVERAGE_TEST_MARKER();
\r
2063 mtCOVERAGE_TEST_MARKER();
\r
2068 mtCOVERAGE_TEST_MARKER();
\r
2073 mtCOVERAGE_TEST_MARKER();
\r
2076 return xCoRoutinePreviouslyWoken;
\r
2079 #endif /* configUSE_CO_ROUTINES */
\r
2080 /*-----------------------------------------------------------*/
\r
2082 #if ( configUSE_CO_ROUTINES == 1 )
\r
2084 signed portBASE_TYPE xQueueCRReceiveFromISR( xQueueHandle xQueue, void *pvBuffer, signed portBASE_TYPE *pxCoRoutineWoken )
\r
2086 signed portBASE_TYPE xReturn;
\r
2087 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
2089 /* We cannot block from an ISR, so check there is data available. If
\r
2090 not then just leave without doing anything. */
\r
2091 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
\r
2093 /* Copy the data from the queue. */
\r
2094 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
\r
2095 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
\r
2097 pxQueue->u.pcReadFrom = pxQueue->pcHead;
\r
2101 mtCOVERAGE_TEST_MARKER();
\r
2103 --( pxQueue->uxMessagesWaiting );
\r
2104 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
\r
2106 if( ( *pxCoRoutineWoken ) == pdFALSE )
\r
2108 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
\r
2110 if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
\r
2112 *pxCoRoutineWoken = pdTRUE;
\r
2116 mtCOVERAGE_TEST_MARKER();
\r
2121 mtCOVERAGE_TEST_MARKER();
\r
2126 mtCOVERAGE_TEST_MARKER();
\r
2139 #endif /* configUSE_CO_ROUTINES */
\r
2140 /*-----------------------------------------------------------*/
\r
2142 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2144 void vQueueAddToRegistry( xQueueHandle xQueue, signed char *pcQueueName )
\r
2146 unsigned portBASE_TYPE ux;
\r
2148 /* See if there is an empty space in the registry. A NULL name denotes
\r
2150 for( ux = ( unsigned portBASE_TYPE ) 0U; ux < ( unsigned portBASE_TYPE ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2152 if( xQueueRegistry[ ux ].pcQueueName == NULL )
\r
2154 /* Store the information on this queue. */
\r
2155 xQueueRegistry[ ux ].pcQueueName = pcQueueName;
\r
2156 xQueueRegistry[ ux ].xHandle = xQueue;
\r
2161 mtCOVERAGE_TEST_MARKER();
\r
2166 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2167 /*-----------------------------------------------------------*/
\r
2169 #if ( configQUEUE_REGISTRY_SIZE > 0 )
\r
2171 void vQueueUnregisterQueue( xQueueHandle xQueue )
\r
2173 unsigned portBASE_TYPE ux;
\r
2175 /* See if the handle of the queue being unregistered in actually in the
\r
2177 for( ux = ( unsigned portBASE_TYPE ) 0U; ux < ( unsigned portBASE_TYPE ) configQUEUE_REGISTRY_SIZE; ux++ )
\r
2179 if( xQueueRegistry[ ux ].xHandle == xQueue )
\r
2181 /* Set the name to NULL to show that this slot if free again. */
\r
2182 xQueueRegistry[ ux ].pcQueueName = NULL;
\r
2187 mtCOVERAGE_TEST_MARKER();
\r
2191 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
\r
2193 #endif /* configQUEUE_REGISTRY_SIZE */
\r
2194 /*-----------------------------------------------------------*/
\r
2196 #if ( configUSE_TIMERS == 1 )
\r
2198 void vQueueWaitForMessageRestricted( xQueueHandle xQueue, portTickType xTicksToWait )
\r
2200 xQUEUE * const pxQueue = ( xQUEUE * ) xQueue;
\r
2202 /* This function should not be called by application code hence the
\r
2203 'Restricted' in its name. It is not part of the public API. It is
\r
2204 designed for use by kernel code, and has special calling requirements.
\r
2205 It can result in vListInsert() being called on a list that can only
\r
2206 possibly ever have one item in it, so the list will be fast, but even
\r
2207 so it should be called with the scheduler locked and not from a critical
\r
2210 /* Only do anything if there are no messages in the queue. This function
\r
2211 will not actually cause the task to block, just place it on a blocked
\r
2212 list. It will not block until the scheduler is unlocked - at which
\r
2213 time a yield will be performed. If an item is added to the queue while
\r
2214 the queue is locked, and the calling task blocks on the queue, then the
\r
2215 calling task will be immediately unblocked when the queue is unlocked. */
\r
2216 prvLockQueue( pxQueue );
\r
2217 if( pxQueue->uxMessagesWaiting == ( unsigned portBASE_TYPE ) 0U )
\r
2219 /* There is nothing in the queue, block for the specified period. */
\r
2220 vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
\r
2224 mtCOVERAGE_TEST_MARKER();
\r
2226 prvUnlockQueue( pxQueue );
\r
2229 #endif /* configUSE_TIMERS */
\r
2230 /*-----------------------------------------------------------*/
\r
2232 #if ( configUSE_QUEUE_SETS == 1 )
\r
2234 xQueueSetHandle xQueueCreateSet( unsigned portBASE_TYPE uxEventQueueLength )
\r
2236 xQueueSetHandle pxQueue;
\r
2238 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( xQUEUE * ), queueQUEUE_TYPE_SET );
\r
2243 #endif /* configUSE_QUEUE_SETS */
\r
2244 /*-----------------------------------------------------------*/
\r
2246 #if ( configUSE_QUEUE_SETS == 1 )
\r
2248 portBASE_TYPE xQueueAddToSet( xQueueSetMemberHandle xQueueOrSemaphore, xQueueSetHandle xQueueSet )
\r
2250 portBASE_TYPE xReturn;
\r
2252 if( ( ( xQUEUE * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
\r
2254 /* Cannot add a queue/semaphore to more than one queue set. */
\r
2257 else if( ( ( xQUEUE * ) xQueueOrSemaphore )->uxMessagesWaiting != ( unsigned portBASE_TYPE ) 0 )
\r
2259 /* Cannot add a queue/semaphore to a queue set if there are already
\r
2260 items in the queue/semaphore. */
\r
2265 taskENTER_CRITICAL();
\r
2267 ( ( xQUEUE * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
\r
2269 taskEXIT_CRITICAL();
\r
2276 #endif /* configUSE_QUEUE_SETS */
\r
2277 /*-----------------------------------------------------------*/
\r
2279 #if ( configUSE_QUEUE_SETS == 1 )
\r
2281 portBASE_TYPE xQueueRemoveFromSet( xQueueSetMemberHandle xQueueOrSemaphore, xQueueSetHandle xQueueSet )
\r
2283 portBASE_TYPE xReturn;
\r
2284 xQUEUE * const pxQueueOrSemaphore = ( xQUEUE * ) xQueueOrSemaphore;
\r
2286 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
\r
2288 /* The queue was not a member of the set. */
\r
2291 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( unsigned portBASE_TYPE ) 0 )
\r
2293 /* It is dangerous to remove a queue from a set when the queue is
\r
2294 not empty because the queue set will still hold pending events for
\r
2300 taskENTER_CRITICAL();
\r
2302 /* The queue is no longer contained in the set. */
\r
2303 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
\r
2305 taskEXIT_CRITICAL();
\r
2310 } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
\r
2312 #endif /* configUSE_QUEUE_SETS */
\r
2313 /*-----------------------------------------------------------*/
\r
2315 #if ( configUSE_QUEUE_SETS == 1 )
\r
2317 xQueueSetMemberHandle xQueueSelectFromSet( xQueueSetHandle xQueueSet, portTickType xBlockTimeTicks )
\r
2319 xQueueSetMemberHandle xReturn = NULL;
\r
2321 ( void ) xQueueGenericReceive( ( xQueueHandle ) xQueueSet, &xReturn, xBlockTimeTicks, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2325 #endif /* configUSE_QUEUE_SETS */
\r
2326 /*-----------------------------------------------------------*/
\r
2328 #if ( configUSE_QUEUE_SETS == 1 )
\r
2330 xQueueSetMemberHandle xQueueSelectFromSetFromISR( xQueueSetHandle xQueueSet )
\r
2332 xQueueSetMemberHandle xReturn = NULL;
\r
2334 ( void ) xQueueReceiveFromISR( ( xQueueHandle ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
\r
2338 #endif /* configUSE_QUEUE_SETS */
\r
2339 /*-----------------------------------------------------------*/
\r
2341 #if ( configUSE_QUEUE_SETS == 1 )
\r
2343 static portBASE_TYPE prvNotifyQueueSetContainer( const xQUEUE * const pxQueue, portBASE_TYPE xCopyPosition )
\r
2345 xQUEUE *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
\r
2346 portBASE_TYPE xReturn = pdFALSE;
\r
2348 configASSERT( pxQueueSetContainer );
\r
2349 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
\r
2351 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
\r
2353 traceQUEUE_SEND( pxQueueSetContainer );
\r
2354 /* The data copies is the handle of the queue that contains data. */
\r
2355 prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
\r
2356 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
\r
2358 if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
\r
2360 /* The task waiting has a higher priority */
\r
2365 mtCOVERAGE_TEST_MARKER();
\r
2370 mtCOVERAGE_TEST_MARKER();
\r
2375 mtCOVERAGE_TEST_MARKER();
\r
2381 #endif /* configUSE_QUEUE_SETS */
\r