]> begriffs open source - freertos/blob - FreeRTOS/Source/queue.c
Kernel changes:
[freertos] / FreeRTOS / Source / queue.c
1 /*\r
2     FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.\r
3     All rights reserved\r
4 \r
5     VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
6 \r
7     This file is part of the FreeRTOS distribution.\r
8 \r
9     FreeRTOS is free software; you can redistribute it and/or modify it under\r
10     the terms of the GNU General Public License (version 2) as published by the\r
11     Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.\r
12 \r
13     ***************************************************************************\r
14     >>!   NOTE: The modification to the GPL is included to allow you to     !<<\r
15     >>!   distribute a combined work that includes FreeRTOS without being   !<<\r
16     >>!   obliged to provide the source code for proprietary components     !<<\r
17     >>!   outside of the FreeRTOS kernel.                                   !<<\r
18     ***************************************************************************\r
19 \r
20     FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
21     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
22     FOR A PARTICULAR PURPOSE.  Full license text is available on the following\r
23     link: http://www.freertos.org/a00114.html\r
24 \r
25     ***************************************************************************\r
26      *                                                                       *\r
27      *    FreeRTOS provides completely free yet professionally developed,    *\r
28      *    robust, strictly quality controlled, supported, and cross          *\r
29      *    platform software that is more than just the market leader, it     *\r
30      *    is the industry's de facto standard.                               *\r
31      *                                                                       *\r
32      *    Help yourself get started quickly while simultaneously helping     *\r
33      *    to support the FreeRTOS project by purchasing a FreeRTOS           *\r
34      *    tutorial book, reference manual, or both:                          *\r
35      *    http://www.FreeRTOS.org/Documentation                              *\r
36      *                                                                       *\r
37     ***************************************************************************\r
38 \r
39     http://www.FreeRTOS.org/FAQHelp.html - Having a problem?  Start by reading\r
40     the FAQ page "My application does not run, what could be wrong?".  Have you\r
41     defined configASSERT()?\r
42 \r
43     http://www.FreeRTOS.org/support - In return for receiving this top quality\r
44     embedded software for free we request you assist our global community by\r
45     participating in the support forum.\r
46 \r
47     http://www.FreeRTOS.org/training - Investing in training allows your team to\r
48     be as productive as possible as early as possible.  Now you can receive\r
49     FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers\r
50     Ltd, and the world's leading authority on the world's leading RTOS.\r
51 \r
52     http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
53     including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
54     compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
55 \r
56     http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.\r
57     Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.\r
58 \r
59     http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High\r
60     Integrity Systems ltd. to sell under the OpenRTOS brand.  Low cost OpenRTOS\r
61     licenses offer ticketed support, indemnification and commercial middleware.\r
62 \r
63     http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
64     engineered and independently SIL3 certified version for use in safety and\r
65     mission critical applications that require provable dependability.\r
66 \r
67     1 tab == 4 spaces!\r
68 */\r
69 \r
70 #include <stdlib.h>\r
71 #include <string.h>\r
72 \r
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
74 all the API functions to use the MPU wrappers.  That should only be done when\r
75 task.h is included from an application file. */\r
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
77 \r
78 #include "FreeRTOS.h"\r
79 #include "task.h"\r
80 #include "queue.h"\r
81 \r
82 #if ( configUSE_CO_ROUTINES == 1 )\r
83         #include "croutine.h"\r
84 #endif\r
85 \r
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the\r
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the\r
88 header files above, but not in this file, in order to generate the correct\r
89 privileged Vs unprivileged linkage and placement. */\r
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */\r
91 \r
92 \r
93 /* Constants used with the xRxLock and xTxLock structure members. */\r
94 #define queueUNLOCKED                                   ( ( BaseType_t ) -1 )\r
95 #define queueLOCKED_UNMODIFIED                  ( ( BaseType_t ) 0 )\r
96 \r
97 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
98 pcTail members are used as pointers into the queue storage area.  When the\r
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
100 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
101 pcTail pointer actually points to the mutex holder (if any).  Map alternative\r
102 names to the pcHead and pcTail structure members to ensure the readability of\r
103 the code is maintained despite this dual use of two structure members.  An\r
104 alternative implementation would be to use a union, but use of a union is\r
105 against the coding standard (although an exception to the standard has been\r
106 permitted where the dual use also significantly changes the type of the\r
107 structure member). */\r
108 #define pxMutexHolder                                   pcTail\r
109 #define uxQueueType                                             pcHead\r
110 #define queueQUEUE_IS_MUTEX                             NULL\r
111 \r
112 /* Semaphores do not actually store or copy data, so have an item size of\r
113 zero. */\r
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
115 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
116 \r
117 #if( configUSE_PREEMPTION == 0 )\r
118         /* If the cooperative scheduler is being used then a yield should not be\r
119         performed just because a higher priority task has been woken. */\r
120         #define queueYIELD_IF_USING_PREEMPTION()\r
121 #else\r
122         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
123 #endif\r
124 \r
125 /*\r
126  * Definition of the queue used by the scheduler.\r
127  * Items are queued by copy, not reference.  See the following link for the\r
128  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html\r
129  */\r
130 typedef struct QueueDefinition\r
131 {\r
132         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
133         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
134         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
135 \r
136         union                                                   /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */\r
137         {\r
138                 int8_t *pcReadFrom;                     /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
139                 UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
140         } u;\r
141 \r
142         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
143         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
144 \r
145         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
146         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
147         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
148 \r
149         volatile BaseType_t xRxLock;    /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
150         volatile BaseType_t xTxLock;    /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
151 \r
152         #if ( configUSE_TRACE_FACILITY == 1 )\r
153                 UBaseType_t uxQueueNumber;\r
154                 uint8_t ucQueueType;\r
155         #endif\r
156 \r
157         #if ( configUSE_QUEUE_SETS == 1 )\r
158                 struct QueueDefinition *pxQueueSetContainer;\r
159         #endif\r
160 \r
161 } xQUEUE;\r
162 \r
163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
164 name below to enable the use of older kernel aware debuggers. */\r
165 typedef xQUEUE Queue_t;\r
166 \r
167 /*-----------------------------------------------------------*/\r
168 \r
169 /*\r
170  * The queue registry is just a means for kernel aware debuggers to locate\r
171  * queue structures.  It has no other purpose so is an optional component.\r
172  */\r
173 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
174 \r
175         /* The type stored within the queue registry array.  This allows a name\r
176         to be assigned to each queue making kernel aware debugging a little\r
177         more user friendly. */\r
178         typedef struct QUEUE_REGISTRY_ITEM\r
179         {\r
180                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
181                 QueueHandle_t xHandle;\r
182         } xQueueRegistryItem;\r
183 \r
184         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
185         new xQueueRegistryItem name below to enable the use of older kernel aware\r
186         debuggers. */\r
187         typedef xQueueRegistryItem QueueRegistryItem_t;\r
188 \r
189         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
190         The pcQueueName member of a structure being NULL is indicative of the\r
191         array position being vacant. */\r
192         PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
193 \r
194 #endif /* configQUEUE_REGISTRY_SIZE */\r
195 \r
196 /*\r
197  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
198  * prevent an ISR from adding or removing items to the queue, but does prevent\r
199  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
200  * queue is locked it will instead increment the appropriate queue lock count\r
201  * to indicate that a task may require unblocking.  When the queue in unlocked\r
202  * these lock counts are inspected, and the appropriate action taken.\r
203  */\r
204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
205 \r
206 /*\r
207  * Uses a critical section to determine if there is any data in a queue.\r
208  *\r
209  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
210  */\r
211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
212 \r
213 /*\r
214  * Uses a critical section to determine if there is any space in a queue.\r
215  *\r
216  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
217  */\r
218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
219 \r
220 /*\r
221  * Copies an item into the queue, either at the front of the queue or the\r
222  * back of the queue.\r
223  */\r
224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
225 \r
226 /*\r
227  * Copies an item out of a queue.\r
228  */\r
229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
230 \r
231 #if ( configUSE_QUEUE_SETS == 1 )\r
232         /*\r
233          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
234          * the queue set that the queue contains data.\r
235          */\r
236         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;\r
237 #endif\r
238 \r
239 /*-----------------------------------------------------------*/\r
240 \r
241 /*\r
242  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
243  * accessing the queue event lists.\r
244  */\r
245 #define prvLockQueue( pxQueue )                                                         \\r
246         taskENTER_CRITICAL();                                                                   \\r
247         {                                                                                                               \\r
248                 if( ( pxQueue )->xRxLock == queueUNLOCKED )                     \\r
249                 {                                                                                                       \\r
250                         ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED;  \\r
251                 }                                                                                                       \\r
252                 if( ( pxQueue )->xTxLock == queueUNLOCKED )                     \\r
253                 {                                                                                                       \\r
254                         ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED;  \\r
255                 }                                                                                                       \\r
256         }                                                                                                               \\r
257         taskEXIT_CRITICAL()\r
258 /*-----------------------------------------------------------*/\r
259 \r
260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
261 {\r
262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
263 \r
264         configASSERT( pxQueue );\r
265 \r
266         taskENTER_CRITICAL();\r
267         {\r
268                 pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );\r
269                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
270                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
271                 pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );\r
272                 pxQueue->xRxLock = queueUNLOCKED;\r
273                 pxQueue->xTxLock = queueUNLOCKED;\r
274 \r
275                 if( xNewQueue == pdFALSE )\r
276                 {\r
277                         /* If there are tasks blocked waiting to read from the queue, then\r
278                         the tasks will remain blocked as after this function exits the queue\r
279                         will still be empty.  If there are tasks blocked waiting to write to\r
280                         the queue, then one should be unblocked as after this function exits\r
281                         it will be possible to write to it. */\r
282                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
283                         {\r
284                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
285                                 {\r
286                                         queueYIELD_IF_USING_PREEMPTION();\r
287                                 }\r
288                                 else\r
289                                 {\r
290                                         mtCOVERAGE_TEST_MARKER();\r
291                                 }\r
292                         }\r
293                         else\r
294                         {\r
295                                 mtCOVERAGE_TEST_MARKER();\r
296                         }\r
297                 }\r
298                 else\r
299                 {\r
300                         /* Ensure the event queues start in the correct state. */\r
301                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
302                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
303                 }\r
304         }\r
305         taskEXIT_CRITICAL();\r
306 \r
307         /* A value is returned for calling semantic consistency with previous\r
308         versions. */\r
309         return pdPASS;\r
310 }\r
311 /*-----------------------------------------------------------*/\r
312 \r
313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
314 {\r
315 Queue_t *pxNewQueue;\r
316 size_t xQueueSizeInBytes;\r
317 QueueHandle_t xReturn = NULL;\r
318 \r
319         /* Remove compiler warnings about unused parameters should\r
320         configUSE_TRACE_FACILITY not be set to 1. */\r
321         ( void ) ucQueueType;\r
322 \r
323         configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
324 \r
325         if( uxItemSize == ( UBaseType_t ) 0 )\r
326         {\r
327                 /* There is not going to be a queue storage area. */\r
328                 xQueueSizeInBytes = ( size_t ) 0;\r
329         }\r
330         else\r
331         {\r
332                 /* The queue is one byte longer than asked for to make wrap checking\r
333                 easier/faster. */\r
334                 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
335         }\r
336 \r
337         /* Allocate the new queue structure and storage area. */\r
338         pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );\r
339 \r
340         if( pxNewQueue != NULL )\r
341         {\r
342                 if( uxItemSize == ( UBaseType_t ) 0 )\r
343                 {\r
344                         /* No RAM was allocated for the queue storage area, but PC head\r
345                         cannot be set to NULL because NULL is used as a key to say the queue\r
346                         is used as a mutex.  Therefore just set pcHead to point to the queue\r
347                         as a benign value that is known to be within the memory map. */\r
348                         pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
349                 }\r
350                 else\r
351                 {\r
352                         /* Jump past the queue structure to find the location of the queue\r
353                         storage area. */\r
354                         pxNewQueue->pcHead = ( ( int8_t * ) pxNewQueue ) + sizeof( Queue_t );\r
355                 }\r
356 \r
357                 /* Initialise the queue members as described above where the queue type\r
358                 is defined. */\r
359                 pxNewQueue->uxLength = uxQueueLength;\r
360                 pxNewQueue->uxItemSize = uxItemSize;\r
361                 ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
362 \r
363                 #if ( configUSE_TRACE_FACILITY == 1 )\r
364                 {\r
365                         pxNewQueue->ucQueueType = ucQueueType;\r
366                 }\r
367                 #endif /* configUSE_TRACE_FACILITY */\r
368 \r
369                 #if( configUSE_QUEUE_SETS == 1 )\r
370                 {\r
371                         pxNewQueue->pxQueueSetContainer = NULL;\r
372                 }\r
373                 #endif /* configUSE_QUEUE_SETS */\r
374 \r
375                 traceQUEUE_CREATE( pxNewQueue );\r
376                 xReturn = pxNewQueue;\r
377         }\r
378         else\r
379         {\r
380                 mtCOVERAGE_TEST_MARKER();\r
381         }\r
382 \r
383         configASSERT( xReturn );\r
384 \r
385         return xReturn;\r
386 }\r
387 /*-----------------------------------------------------------*/\r
388 \r
389 #if ( configUSE_MUTEXES == 1 )\r
390 \r
391         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
392         {\r
393         Queue_t *pxNewQueue;\r
394 \r
395                 /* Prevent compiler warnings about unused parameters if\r
396                 configUSE_TRACE_FACILITY does not equal 1. */\r
397                 ( void ) ucQueueType;\r
398 \r
399                 /* Allocate the new queue structure. */\r
400                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );\r
401                 if( pxNewQueue != NULL )\r
402                 {\r
403                         /* Information required for priority inheritance. */\r
404                         pxNewQueue->pxMutexHolder = NULL;\r
405                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
406 \r
407                         /* Queues used as a mutex no data is actually copied into or out\r
408                         of the queue. */\r
409                         pxNewQueue->pcWriteTo = NULL;\r
410                         pxNewQueue->u.pcReadFrom = NULL;\r
411 \r
412                         /* Each mutex has a length of 1 (like a binary semaphore) and\r
413                         an item size of 0 as nothing is actually copied into or out\r
414                         of the mutex. */\r
415                         pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
416                         pxNewQueue->uxLength = ( UBaseType_t ) 1U;\r
417                         pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;\r
418                         pxNewQueue->xRxLock = queueUNLOCKED;\r
419                         pxNewQueue->xTxLock = queueUNLOCKED;\r
420 \r
421                         #if ( configUSE_TRACE_FACILITY == 1 )\r
422                         {\r
423                                 pxNewQueue->ucQueueType = ucQueueType;\r
424                         }\r
425                         #endif\r
426 \r
427                         #if ( configUSE_QUEUE_SETS == 1 )\r
428                         {\r
429                                 pxNewQueue->pxQueueSetContainer = NULL;\r
430                         }\r
431                         #endif\r
432 \r
433                         /* Ensure the event queues start with the correct state. */\r
434                         vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );\r
435                         vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );\r
436 \r
437                         traceCREATE_MUTEX( pxNewQueue );\r
438 \r
439                         /* Start with the semaphore in the expected state. */\r
440                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
441                 }\r
442                 else\r
443                 {\r
444                         traceCREATE_MUTEX_FAILED();\r
445                 }\r
446 \r
447                 return pxNewQueue;\r
448         }\r
449 \r
450 #endif /* configUSE_MUTEXES */\r
451 /*-----------------------------------------------------------*/\r
452 \r
453 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
454 \r
455         void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
456         {\r
457         void *pxReturn;\r
458 \r
459                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
460                 be called directly.  Note:  This is a good way of determining if the\r
461                 calling task is the mutex holder, but not a good way of determining the\r
462                 identity of the mutex holder, as the holder may change between the\r
463                 following critical section exiting and the function returning. */\r
464                 taskENTER_CRITICAL();\r
465                 {\r
466                         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
467                         {\r
468                                 pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;\r
469                         }\r
470                         else\r
471                         {\r
472                                 pxReturn = NULL;\r
473                         }\r
474                 }\r
475                 taskEXIT_CRITICAL();\r
476 \r
477                 return pxReturn;\r
478         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
479 \r
480 #endif\r
481 /*-----------------------------------------------------------*/\r
482 \r
483 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
484 \r
485         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
486         {\r
487         BaseType_t xReturn;\r
488         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
489 \r
490                 configASSERT( pxMutex );\r
491 \r
492                 /* If this is the task that holds the mutex then pxMutexHolder will not\r
493                 change outside of this task.  If this task does not hold the mutex then\r
494                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
495                 this is the only condition we are interested in it does not matter if\r
496                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
497                 mutual exclusion is required to test the pxMutexHolder variable. */\r
498                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */\r
499                 {\r
500                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
501 \r
502                         /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to\r
503                         the task handle, therefore no underflow check is required.  Also,\r
504                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
505                         there can only be one, no mutual exclusion is required to modify the\r
506                         uxRecursiveCallCount member. */\r
507                         ( pxMutex->u.uxRecursiveCallCount )--;\r
508 \r
509                         /* Have we unwound the call count? */\r
510                         if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
511                         {\r
512                                 /* Return the mutex.  This will automatically unblock any other\r
513                                 task that might be waiting to access the mutex. */\r
514                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
515                         }\r
516                         else\r
517                         {\r
518                                 mtCOVERAGE_TEST_MARKER();\r
519                         }\r
520 \r
521                         xReturn = pdPASS;\r
522                 }\r
523                 else\r
524                 {\r
525                         /* The mutex cannot be given because the calling task is not the\r
526                         holder. */\r
527                         xReturn = pdFAIL;\r
528 \r
529                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
530                 }\r
531 \r
532                 return xReturn;\r
533         }\r
534 \r
535 #endif /* configUSE_RECURSIVE_MUTEXES */\r
536 /*-----------------------------------------------------------*/\r
537 \r
538 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
539 \r
540         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
541         {\r
542         BaseType_t xReturn;\r
543         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
544 \r
545                 configASSERT( pxMutex );\r
546 \r
547                 /* Comments regarding mutual exclusion as per those within\r
548                 xQueueGiveMutexRecursive(). */\r
549 \r
550                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
551 \r
552                 if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
553                 {\r
554                         ( pxMutex->u.uxRecursiveCallCount )++;\r
555                         xReturn = pdPASS;\r
556                 }\r
557                 else\r
558                 {\r
559                         xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );\r
560 \r
561                         /* pdPASS will only be returned if the mutex was successfully\r
562                         obtained.  The calling task may have entered the Blocked state\r
563                         before reaching here. */\r
564                         if( xReturn == pdPASS )\r
565                         {\r
566                                 ( pxMutex->u.uxRecursiveCallCount )++;\r
567                         }\r
568                         else\r
569                         {\r
570                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
571                         }\r
572                 }\r
573 \r
574                 return xReturn;\r
575         }\r
576 \r
577 #endif /* configUSE_RECURSIVE_MUTEXES */\r
578 /*-----------------------------------------------------------*/\r
579 \r
580 #if ( configUSE_COUNTING_SEMAPHORES == 1 )\r
581 \r
582         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
583         {\r
584         QueueHandle_t xHandle;\r
585 \r
586                 configASSERT( uxMaxCount != 0 );\r
587                 configASSERT( uxInitialCount <= uxMaxCount );\r
588 \r
589                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
590 \r
591                 if( xHandle != NULL )\r
592                 {\r
593                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
594 \r
595                         traceCREATE_COUNTING_SEMAPHORE();\r
596                 }\r
597                 else\r
598                 {\r
599                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
600                 }\r
601 \r
602                 configASSERT( xHandle );\r
603                 return xHandle;\r
604         }\r
605 \r
606 #endif /* configUSE_COUNTING_SEMAPHORES */\r
607 /*-----------------------------------------------------------*/\r
608 \r
609 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
610 {\r
611 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
612 TimeOut_t xTimeOut;\r
613 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
614 \r
615         configASSERT( pxQueue );\r
616         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
617         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
618         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
619         {\r
620                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
621         }\r
622         #endif\r
623 \r
624 \r
625         /* This function relaxes the coding standard somewhat to allow return\r
626         statements within the function itself.  This is done in the interest\r
627         of execution time efficiency. */\r
628         for( ;; )\r
629         {\r
630                 taskENTER_CRITICAL();\r
631                 {\r
632                         /* Is there room on the queue now?  The running task must be the\r
633                         highest priority task wanting to access the queue.  If the head item\r
634                         in the queue is to be overwritten then it does not matter if the\r
635                         queue is full. */\r
636                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
637                         {\r
638                                 traceQUEUE_SEND( pxQueue );\r
639                                 xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
640 \r
641                                 #if ( configUSE_QUEUE_SETS == 1 )\r
642                                 {\r
643                                         if( pxQueue->pxQueueSetContainer != NULL )\r
644                                         {\r
645                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
646                                                 {\r
647                                                         /* The queue is a member of a queue set, and posting\r
648                                                         to the queue set caused a higher priority task to\r
649                                                         unblock. A context switch is required. */\r
650                                                         queueYIELD_IF_USING_PREEMPTION();\r
651                                                 }\r
652                                                 else\r
653                                                 {\r
654                                                         mtCOVERAGE_TEST_MARKER();\r
655                                                 }\r
656                                         }\r
657                                         else\r
658                                         {\r
659                                                 /* If there was a task waiting for data to arrive on the\r
660                                                 queue then unblock it now. */\r
661                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
662                                                 {\r
663                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
664                                                         {\r
665                                                                 /* The unblocked task has a priority higher than\r
666                                                                 our own so yield immediately.  Yes it is ok to\r
667                                                                 do this from within the critical section - the\r
668                                                                 kernel takes care of that. */\r
669                                                                 queueYIELD_IF_USING_PREEMPTION();\r
670                                                         }\r
671                                                         else\r
672                                                         {\r
673                                                                 mtCOVERAGE_TEST_MARKER();\r
674                                                         }\r
675                                                 }\r
676                                                 else if( xYieldRequired != pdFALSE )\r
677                                                 {\r
678                                                         /* This path is a special case that will only get\r
679                                                         executed if the task was holding multiple mutexes\r
680                                                         and the mutexes were given back in an order that is\r
681                                                         different to that in which they were taken. */\r
682                                                         queueYIELD_IF_USING_PREEMPTION();\r
683                                                 }\r
684                                                 else\r
685                                                 {\r
686                                                         mtCOVERAGE_TEST_MARKER();\r
687                                                 }\r
688                                         }\r
689                                 }\r
690                                 #else /* configUSE_QUEUE_SETS */\r
691                                 {\r
692                                         /* If there was a task waiting for data to arrive on the\r
693                                         queue then unblock it now. */\r
694                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
695                                         {\r
696                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
697                                                 {\r
698                                                         /* The unblocked task has a priority higher than\r
699                                                         our own so yield immediately.  Yes it is ok to do\r
700                                                         this from within the critical section - the kernel\r
701                                                         takes care of that. */\r
702                                                         queueYIELD_IF_USING_PREEMPTION();\r
703                                                 }\r
704                                                 else\r
705                                                 {\r
706                                                         mtCOVERAGE_TEST_MARKER();\r
707                                                 }\r
708                                         }\r
709                                         else if( xYieldRequired != pdFALSE )\r
710                                         {\r
711                                                 /* This path is a special case that will only get\r
712                                                 executed if the task was holding multiple mutexes and\r
713                                                 the mutexes were given back in an order that is\r
714                                                 different to that in which they were taken. */\r
715                                                 queueYIELD_IF_USING_PREEMPTION();\r
716                                         }\r
717                                         else\r
718                                         {\r
719                                                 mtCOVERAGE_TEST_MARKER();\r
720                                         }\r
721                                 }\r
722                                 #endif /* configUSE_QUEUE_SETS */\r
723 \r
724                                 taskEXIT_CRITICAL();\r
725                                 return pdPASS;\r
726                         }\r
727                         else\r
728                         {\r
729                                 if( xTicksToWait == ( TickType_t ) 0 )\r
730                                 {\r
731                                         /* The queue was full and no block time is specified (or\r
732                                         the block time has expired) so leave now. */\r
733                                         taskEXIT_CRITICAL();\r
734 \r
735                                         /* Return to the original privilege level before exiting\r
736                                         the function. */\r
737                                         traceQUEUE_SEND_FAILED( pxQueue );\r
738                                         return errQUEUE_FULL;\r
739                                 }\r
740                                 else if( xEntryTimeSet == pdFALSE )\r
741                                 {\r
742                                         /* The queue was full and a block time was specified so\r
743                                         configure the timeout structure. */\r
744                                         vTaskSetTimeOutState( &xTimeOut );\r
745                                         xEntryTimeSet = pdTRUE;\r
746                                 }\r
747                                 else\r
748                                 {\r
749                                         /* Entry time was already set. */\r
750                                         mtCOVERAGE_TEST_MARKER();\r
751                                 }\r
752                         }\r
753                 }\r
754                 taskEXIT_CRITICAL();\r
755 \r
756                 /* Interrupts and other tasks can send to and receive from the queue\r
757                 now the critical section has been exited. */\r
758 \r
759                 vTaskSuspendAll();\r
760                 prvLockQueue( pxQueue );\r
761 \r
762                 /* Update the timeout state to see if it has expired yet. */\r
763                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
764                 {\r
765                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
766                         {\r
767                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
768                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
769 \r
770                                 /* Unlocking the queue means queue events can effect the\r
771                                 event list.  It is possible     that interrupts occurring now\r
772                                 remove this task from the event list again - but as the\r
773                                 scheduler is suspended the task will go onto the pending\r
774                                 ready last instead of the actual ready list. */\r
775                                 prvUnlockQueue( pxQueue );\r
776 \r
777                                 /* Resuming the scheduler will move tasks from the pending\r
778                                 ready list into the ready list - so it is feasible that this\r
779                                 task is already in a ready list before it yields - in which\r
780                                 case the yield will not cause a context switch unless there\r
781                                 is also a higher priority task in the pending ready list. */\r
782                                 if( xTaskResumeAll() == pdFALSE )\r
783                                 {\r
784                                         portYIELD_WITHIN_API();\r
785                                 }\r
786                         }\r
787                         else\r
788                         {\r
789                                 /* Try again. */\r
790                                 prvUnlockQueue( pxQueue );\r
791                                 ( void ) xTaskResumeAll();\r
792                         }\r
793                 }\r
794                 else\r
795                 {\r
796                         /* The timeout has expired. */\r
797                         prvUnlockQueue( pxQueue );\r
798                         ( void ) xTaskResumeAll();\r
799 \r
800                         traceQUEUE_SEND_FAILED( pxQueue );\r
801                         return errQUEUE_FULL;\r
802                 }\r
803         }\r
804 }\r
805 /*-----------------------------------------------------------*/\r
806 \r
807 #if ( configUSE_ALTERNATIVE_API == 1 )\r
808 \r
809         BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )\r
810         {\r
811         BaseType_t xEntryTimeSet = pdFALSE;\r
812         TimeOut_t xTimeOut;\r
813         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
814 \r
815                 configASSERT( pxQueue );\r
816                 configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
817 \r
818                 for( ;; )\r
819                 {\r
820                         taskENTER_CRITICAL();\r
821                         {\r
822                                 /* Is there room on the queue now?  To be running we must be\r
823                                 the highest priority task wanting to access the queue. */\r
824                                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
825                                 {\r
826                                         traceQUEUE_SEND( pxQueue );\r
827                                         prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
828 \r
829                                         /* If there was a task waiting for data to arrive on the\r
830                                         queue then unblock it now. */\r
831                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
832                                         {\r
833                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )\r
834                                                 {\r
835                                                         /* The unblocked task has a priority higher than\r
836                                                         our own so yield immediately. */\r
837                                                         portYIELD_WITHIN_API();\r
838                                                 }\r
839                                                 else\r
840                                                 {\r
841                                                         mtCOVERAGE_TEST_MARKER();\r
842                                                 }\r
843                                         }\r
844                                         else\r
845                                         {\r
846                                                 mtCOVERAGE_TEST_MARKER();\r
847                                         }\r
848 \r
849                                         taskEXIT_CRITICAL();\r
850                                         return pdPASS;\r
851                                 }\r
852                                 else\r
853                                 {\r
854                                         if( xTicksToWait == ( TickType_t ) 0 )\r
855                                         {\r
856                                                 taskEXIT_CRITICAL();\r
857                                                 return errQUEUE_FULL;\r
858                                         }\r
859                                         else if( xEntryTimeSet == pdFALSE )\r
860                                         {\r
861                                                 vTaskSetTimeOutState( &xTimeOut );\r
862                                                 xEntryTimeSet = pdTRUE;\r
863                                         }\r
864                                 }\r
865                         }\r
866                         taskEXIT_CRITICAL();\r
867 \r
868                         taskENTER_CRITICAL();\r
869                         {\r
870                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
871                                 {\r
872                                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
873                                         {\r
874                                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
875                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
876                                                 portYIELD_WITHIN_API();\r
877                                         }\r
878                                         else\r
879                                         {\r
880                                                 mtCOVERAGE_TEST_MARKER();\r
881                                         }\r
882                                 }\r
883                                 else\r
884                                 {\r
885                                         taskEXIT_CRITICAL();\r
886                                         traceQUEUE_SEND_FAILED( pxQueue );\r
887                                         return errQUEUE_FULL;\r
888                                 }\r
889                         }\r
890                         taskEXIT_CRITICAL();\r
891                 }\r
892         }\r
893 \r
894 #endif /* configUSE_ALTERNATIVE_API */\r
895 /*-----------------------------------------------------------*/\r
896 \r
897 #if ( configUSE_ALTERNATIVE_API == 1 )\r
898 \r
899         BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )\r
900         {\r
901         BaseType_t xEntryTimeSet = pdFALSE;\r
902         TimeOut_t xTimeOut;\r
903         int8_t *pcOriginalReadPosition;\r
904         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
905 \r
906                 configASSERT( pxQueue );\r
907                 configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
908 \r
909                 for( ;; )\r
910                 {\r
911                         taskENTER_CRITICAL();\r
912                         {\r
913                                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
914                                 {\r
915                                         /* Remember our read position in case we are just peeking. */\r
916                                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
917 \r
918                                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
919 \r
920                                         if( xJustPeeking == pdFALSE )\r
921                                         {\r
922                                                 traceQUEUE_RECEIVE( pxQueue );\r
923 \r
924                                                 /* Data is actually being removed (not just peeked). */\r
925                                                 --( pxQueue->uxMessagesWaiting );\r
926 \r
927                                                 #if ( configUSE_MUTEXES == 1 )\r
928                                                 {\r
929                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
930                                                         {\r
931                                                                 /* Record the information required to implement\r
932                                                                 priority inheritance should it become necessary. */\r
933                                                                 pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();\r
934                                                         }\r
935                                                         else\r
936                                                         {\r
937                                                                 mtCOVERAGE_TEST_MARKER();\r
938                                                         }\r
939                                                 }\r
940                                                 #endif\r
941 \r
942                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
943                                                 {\r
944                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
945                                                         {\r
946                                                                 portYIELD_WITHIN_API();\r
947                                                         }\r
948                                                         else\r
949                                                         {\r
950                                                                 mtCOVERAGE_TEST_MARKER();\r
951                                                         }\r
952                                                 }\r
953                                         }\r
954                                         else\r
955                                         {\r
956                                                 traceQUEUE_PEEK( pxQueue );\r
957 \r
958                                                 /* The data is not being removed, so reset our read\r
959                                                 pointer. */\r
960                                                 pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
961 \r
962                                                 /* The data is being left in the queue, so see if there are\r
963                                                 any other tasks waiting for the data. */\r
964                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
965                                                 {\r
966                                                         /* Tasks that are removed from the event list will get added to\r
967                                                         the pending ready list as the scheduler is still suspended. */\r
968                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
969                                                         {\r
970                                                                 /* The task waiting has a higher priority than this task. */\r
971                                                                 portYIELD_WITHIN_API();\r
972                                                         }\r
973                                                         else\r
974                                                         {\r
975                                                                 mtCOVERAGE_TEST_MARKER();\r
976                                                         }\r
977                                                 }\r
978                                                 else\r
979                                                 {\r
980                                                         mtCOVERAGE_TEST_MARKER();\r
981                                                 }\r
982                                         }\r
983 \r
984                                         taskEXIT_CRITICAL();\r
985                                         return pdPASS;\r
986                                 }\r
987                                 else\r
988                                 {\r
989                                         if( xTicksToWait == ( TickType_t ) 0 )\r
990                                         {\r
991                                                 taskEXIT_CRITICAL();\r
992                                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
993                                                 return errQUEUE_EMPTY;\r
994                                         }\r
995                                         else if( xEntryTimeSet == pdFALSE )\r
996                                         {\r
997                                                 vTaskSetTimeOutState( &xTimeOut );\r
998                                                 xEntryTimeSet = pdTRUE;\r
999                                         }\r
1000                                 }\r
1001                         }\r
1002                         taskEXIT_CRITICAL();\r
1003 \r
1004                         taskENTER_CRITICAL();\r
1005                         {\r
1006                                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1007                                 {\r
1008                                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1009                                         {\r
1010                                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1011 \r
1012                                                 #if ( configUSE_MUTEXES == 1 )\r
1013                                                 {\r
1014                                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1015                                                         {\r
1016                                                                 taskENTER_CRITICAL();\r
1017                                                                 {\r
1018                                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1019                                                                 }\r
1020                                                                 taskEXIT_CRITICAL();\r
1021                                                         }\r
1022                                                         else\r
1023                                                         {\r
1024                                                                 mtCOVERAGE_TEST_MARKER();\r
1025                                                         }\r
1026                                                 }\r
1027                                                 #endif\r
1028 \r
1029                                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1030                                                 portYIELD_WITHIN_API();\r
1031                                         }\r
1032                                         else\r
1033                                         {\r
1034                                                 mtCOVERAGE_TEST_MARKER();\r
1035                                         }\r
1036                                 }\r
1037                                 else\r
1038                                 {\r
1039                                         taskEXIT_CRITICAL();\r
1040                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1041                                         return errQUEUE_EMPTY;\r
1042                                 }\r
1043                         }\r
1044                         taskEXIT_CRITICAL();\r
1045                 }\r
1046         }\r
1047 \r
1048 \r
1049 #endif /* configUSE_ALTERNATIVE_API */\r
1050 /*-----------------------------------------------------------*/\r
1051 \r
1052 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
1053 {\r
1054 BaseType_t xReturn;\r
1055 UBaseType_t uxSavedInterruptStatus;\r
1056 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1057 \r
1058         configASSERT( pxQueue );\r
1059         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1060         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
1061 \r
1062         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1063         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1064         above the maximum system call priority are kept permanently enabled, even\r
1065         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1066         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1067         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1068         failure if a FreeRTOS API function is called from an interrupt that has been\r
1069         assigned a priority above the configured maximum system call priority.\r
1070         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1071         that have been assigned a priority at or (logically) below the maximum\r
1072         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1073         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1074         More information (albeit Cortex-M specific) is provided on the following\r
1075         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1076         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1077 \r
1078         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
1079         in the queue.  Also don't directly wake a task that was blocked on a queue\r
1080         read, instead return a flag to say whether a context switch is required or\r
1081         not (i.e. has a task with a higher priority than us been woken by this\r
1082         post). */\r
1083         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1084         {\r
1085                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
1086                 {\r
1087                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1088 \r
1089                         /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a\r
1090                         semaphore or mutex.  That means prvCopyDataToQueue() cannot result\r
1091                         in a task disinheriting a priority and prvCopyDataToQueue() can be\r
1092                         called here even though the disinherit function does not check if\r
1093                         the scheduler is suspended before accessing the ready lists. */\r
1094                         ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
1095 \r
1096                         /* The event list is not altered if the queue is locked.  This will\r
1097                         be done when the queue is unlocked later. */\r
1098                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1099                         {\r
1100                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1101                                 {\r
1102                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1103                                         {\r
1104                                                 if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )\r
1105                                                 {\r
1106                                                         /* The queue is a member of a queue set, and posting\r
1107                                                         to the queue set caused a higher priority task to\r
1108                                                         unblock.  A context switch is required. */\r
1109                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1110                                                         {\r
1111                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1112                                                         }\r
1113                                                         else\r
1114                                                         {\r
1115                                                                 mtCOVERAGE_TEST_MARKER();\r
1116                                                         }\r
1117                                                 }\r
1118                                                 else\r
1119                                                 {\r
1120                                                         mtCOVERAGE_TEST_MARKER();\r
1121                                                 }\r
1122                                         }\r
1123                                         else\r
1124                                         {\r
1125                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1126                                                 {\r
1127                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1128                                                         {\r
1129                                                                 /* The task waiting has a higher priority so\r
1130                                                                 record that a context switch is required. */\r
1131                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1132                                                                 {\r
1133                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1134                                                                 }\r
1135                                                                 else\r
1136                                                                 {\r
1137                                                                         mtCOVERAGE_TEST_MARKER();\r
1138                                                                 }\r
1139                                                         }\r
1140                                                         else\r
1141                                                         {\r
1142                                                                 mtCOVERAGE_TEST_MARKER();\r
1143                                                         }\r
1144                                                 }\r
1145                                                 else\r
1146                                                 {\r
1147                                                         mtCOVERAGE_TEST_MARKER();\r
1148                                                 }\r
1149                                         }\r
1150                                 }\r
1151                                 #else /* configUSE_QUEUE_SETS */\r
1152                                 {\r
1153                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1154                                         {\r
1155                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1156                                                 {\r
1157                                                         /* The task waiting has a higher priority so record that a\r
1158                                                         context switch is required. */\r
1159                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1160                                                         {\r
1161                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1162                                                         }\r
1163                                                         else\r
1164                                                         {\r
1165                                                                 mtCOVERAGE_TEST_MARKER();\r
1166                                                         }\r
1167                                                 }\r
1168                                                 else\r
1169                                                 {\r
1170                                                         mtCOVERAGE_TEST_MARKER();\r
1171                                                 }\r
1172                                         }\r
1173                                         else\r
1174                                         {\r
1175                                                 mtCOVERAGE_TEST_MARKER();\r
1176                                         }\r
1177                                 }\r
1178                                 #endif /* configUSE_QUEUE_SETS */\r
1179                         }\r
1180                         else\r
1181                         {\r
1182                                 /* Increment the lock count so the task that unlocks the queue\r
1183                                 knows that data was posted while it was locked. */\r
1184                                 ++( pxQueue->xTxLock );\r
1185                         }\r
1186 \r
1187                         xReturn = pdPASS;\r
1188                 }\r
1189                 else\r
1190                 {\r
1191                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1192                         xReturn = errQUEUE_FULL;\r
1193                 }\r
1194         }\r
1195         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1196 \r
1197         return xReturn;\r
1198 }\r
1199 /*-----------------------------------------------------------*/\r
1200 \r
1201 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
1202 {\r
1203 BaseType_t xReturn;\r
1204 UBaseType_t uxSavedInterruptStatus;\r
1205 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1206 \r
1207         /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
1208         item size is 0.  Don't directly wake a task that was blocked on a queue\r
1209         read, instead return a flag to say whether a context switch is required or\r
1210         not (i.e. has a task with a higher priority than us been woken by this\r
1211         post). */\r
1212 \r
1213         configASSERT( pxQueue );\r
1214 \r
1215         /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()\r
1216         if the item size is not 0. */\r
1217         configASSERT( pxQueue->uxItemSize == 0 );\r
1218 \r
1219         /* Normally a mutex would not be given from an interrupt, especially if\r
1220         there is a mutex holder, as priority inheritance makes no sense for an\r
1221         interrupts, only tasks. */\r
1222         configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );\r
1223 \r
1224         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1225         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1226         above the maximum system call priority are kept permanently enabled, even\r
1227         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1228         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1229         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1230         failure if a FreeRTOS API function is called from an interrupt that has been\r
1231         assigned a priority above the configured maximum system call priority.\r
1232         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1233         that have been assigned a priority at or (logically) below the maximum\r
1234         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1235         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1236         More information (albeit Cortex-M specific) is provided on the following\r
1237         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1238         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1239 \r
1240         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1241         {\r
1242                 /* When the queue is used to implement a semaphore no data is ever\r
1243                 moved through the queue but it is still valid to see if the queue 'has\r
1244                 space'. */\r
1245                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
1246                 {\r
1247                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1248 \r
1249                         /* A task can only have an inherited priority if it is a mutex\r
1250                         holder - and if there is a mutex holder then the mutex cannot be\r
1251                         given from an ISR.  As this is the ISR version of the function it\r
1252                         can be assumed there is no mutex holder and no need to determine if\r
1253                         priority disinheritance is needed.  Simply increase the count of\r
1254                         messages (semaphores) available. */\r
1255                         ++( pxQueue->uxMessagesWaiting );\r
1256 \r
1257                         /* The event list is not altered if the queue is locked.  This will\r
1258                         be done when the queue is unlocked later. */\r
1259                         if( pxQueue->xTxLock == queueUNLOCKED )\r
1260                         {\r
1261                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1262                                 {\r
1263                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1264                                         {\r
1265                                                 if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1266                                                 {\r
1267                                                         /* The semaphore is a member of a queue set, and\r
1268                                                         posting to the queue set caused a higher priority\r
1269                                                         task to unblock.  A context switch is required. */\r
1270                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1271                                                         {\r
1272                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1273                                                         }\r
1274                                                         else\r
1275                                                         {\r
1276                                                                 mtCOVERAGE_TEST_MARKER();\r
1277                                                         }\r
1278                                                 }\r
1279                                                 else\r
1280                                                 {\r
1281                                                         mtCOVERAGE_TEST_MARKER();\r
1282                                                 }\r
1283                                         }\r
1284                                         else\r
1285                                         {\r
1286                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1287                                                 {\r
1288                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1289                                                         {\r
1290                                                                 /* The task waiting has a higher priority so\r
1291                                                                 record that a context switch is required. */\r
1292                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1293                                                                 {\r
1294                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1295                                                                 }\r
1296                                                                 else\r
1297                                                                 {\r
1298                                                                         mtCOVERAGE_TEST_MARKER();\r
1299                                                                 }\r
1300                                                         }\r
1301                                                         else\r
1302                                                         {\r
1303                                                                 mtCOVERAGE_TEST_MARKER();\r
1304                                                         }\r
1305                                                 }\r
1306                                                 else\r
1307                                                 {\r
1308                                                         mtCOVERAGE_TEST_MARKER();\r
1309                                                 }\r
1310                                         }\r
1311                                 }\r
1312                                 #else /* configUSE_QUEUE_SETS */\r
1313                                 {\r
1314                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1315                                         {\r
1316                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1317                                                 {\r
1318                                                         /* The task waiting has a higher priority so record that a\r
1319                                                         context switch is required. */\r
1320                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1321                                                         {\r
1322                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1323                                                         }\r
1324                                                         else\r
1325                                                         {\r
1326                                                                 mtCOVERAGE_TEST_MARKER();\r
1327                                                         }\r
1328                                                 }\r
1329                                                 else\r
1330                                                 {\r
1331                                                         mtCOVERAGE_TEST_MARKER();\r
1332                                                 }\r
1333                                         }\r
1334                                         else\r
1335                                         {\r
1336                                                 mtCOVERAGE_TEST_MARKER();\r
1337                                         }\r
1338                                 }\r
1339                                 #endif /* configUSE_QUEUE_SETS */\r
1340                         }\r
1341                         else\r
1342                         {\r
1343                                 /* Increment the lock count so the task that unlocks the queue\r
1344                                 knows that data was posted while it was locked. */\r
1345                                 ++( pxQueue->xTxLock );\r
1346                         }\r
1347 \r
1348                         xReturn = pdPASS;\r
1349                 }\r
1350                 else\r
1351                 {\r
1352                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1353                         xReturn = errQUEUE_FULL;\r
1354                 }\r
1355         }\r
1356         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1357 \r
1358         return xReturn;\r
1359 }\r
1360 /*-----------------------------------------------------------*/\r
1361 \r
1362 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )\r
1363 {\r
1364 BaseType_t xEntryTimeSet = pdFALSE;\r
1365 TimeOut_t xTimeOut;\r
1366 int8_t *pcOriginalReadPosition;\r
1367 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1368 \r
1369         configASSERT( pxQueue );\r
1370         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1371         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1372         {\r
1373                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1374         }\r
1375         #endif\r
1376 \r
1377         /* This function relaxes the coding standard somewhat to allow return\r
1378         statements within the function itself.  This is done in the interest\r
1379         of execution time efficiency. */\r
1380 \r
1381         for( ;; )\r
1382         {\r
1383                 taskENTER_CRITICAL();\r
1384                 {\r
1385                         /* Is there data in the queue now?  To be running the calling task\r
1386                         must be the highest priority task wanting to access the queue. */\r
1387                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1388                         {\r
1389                                 /* Remember the read position in case the queue is only being\r
1390                                 peeked. */\r
1391                                 pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1392 \r
1393                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1394 \r
1395                                 if( xJustPeeking == pdFALSE )\r
1396                                 {\r
1397                                         traceQUEUE_RECEIVE( pxQueue );\r
1398 \r
1399                                         /* Actually removing data, not just peeking. */\r
1400                                         --( pxQueue->uxMessagesWaiting );\r
1401 \r
1402                                         #if ( configUSE_MUTEXES == 1 )\r
1403                                         {\r
1404                                                 if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1405                                                 {\r
1406                                                         /* Record the information required to implement\r
1407                                                         priority inheritance should it become necessary. */\r
1408                                                         pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */\r
1409                                                 }\r
1410                                                 else\r
1411                                                 {\r
1412                                                         mtCOVERAGE_TEST_MARKER();\r
1413                                                 }\r
1414                                         }\r
1415                                         #endif /* configUSE_MUTEXES */\r
1416 \r
1417                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1418                                         {\r
1419                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )\r
1420                                                 {\r
1421                                                         queueYIELD_IF_USING_PREEMPTION();\r
1422                                                 }\r
1423                                                 else\r
1424                                                 {\r
1425                                                         mtCOVERAGE_TEST_MARKER();\r
1426                                                 }\r
1427                                         }\r
1428                                         else\r
1429                                         {\r
1430                                                 mtCOVERAGE_TEST_MARKER();\r
1431                                         }\r
1432                                 }\r
1433                                 else\r
1434                                 {\r
1435                                         traceQUEUE_PEEK( pxQueue );\r
1436 \r
1437                                         /* The data is not being removed, so reset the read\r
1438                                         pointer. */\r
1439                                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1440 \r
1441                                         /* The data is being left in the queue, so see if there are\r
1442                                         any other tasks waiting for the data. */\r
1443                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1444                                         {\r
1445                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1446                                                 {\r
1447                                                         /* The task waiting has a higher priority than this task. */\r
1448                                                         queueYIELD_IF_USING_PREEMPTION();\r
1449                                                 }\r
1450                                                 else\r
1451                                                 {\r
1452                                                         mtCOVERAGE_TEST_MARKER();\r
1453                                                 }\r
1454                                         }\r
1455                                         else\r
1456                                         {\r
1457                                                 mtCOVERAGE_TEST_MARKER();\r
1458                                         }\r
1459                                 }\r
1460 \r
1461                                 taskEXIT_CRITICAL();\r
1462                                 return pdPASS;\r
1463                         }\r
1464                         else\r
1465                         {\r
1466                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1467                                 {\r
1468                                         /* The queue was empty and no block time is specified (or\r
1469                                         the block time has expired) so leave now. */\r
1470                                         taskEXIT_CRITICAL();\r
1471                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1472                                         return errQUEUE_EMPTY;\r
1473                                 }\r
1474                                 else if( xEntryTimeSet == pdFALSE )\r
1475                                 {\r
1476                                         /* The queue was empty and a block time was specified so\r
1477                                         configure the timeout structure. */\r
1478                                         vTaskSetTimeOutState( &xTimeOut );\r
1479                                         xEntryTimeSet = pdTRUE;\r
1480                                 }\r
1481                                 else\r
1482                                 {\r
1483                                         /* Entry time was already set. */\r
1484                                         mtCOVERAGE_TEST_MARKER();\r
1485                                 }\r
1486                         }\r
1487                 }\r
1488                 taskEXIT_CRITICAL();\r
1489 \r
1490                 /* Interrupts and other tasks can send to and receive from the queue\r
1491                 now the critical section has been exited. */\r
1492 \r
1493                 vTaskSuspendAll();\r
1494                 prvLockQueue( pxQueue );\r
1495 \r
1496                 /* Update the timeout state to see if it has expired yet. */\r
1497                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1498                 {\r
1499                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1500                         {\r
1501                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1502 \r
1503                                 #if ( configUSE_MUTEXES == 1 )\r
1504                                 {\r
1505                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1506                                         {\r
1507                                                 taskENTER_CRITICAL();\r
1508                                                 {\r
1509                                                         vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );\r
1510                                                 }\r
1511                                                 taskEXIT_CRITICAL();\r
1512                                         }\r
1513                                         else\r
1514                                         {\r
1515                                                 mtCOVERAGE_TEST_MARKER();\r
1516                                         }\r
1517                                 }\r
1518                                 #endif\r
1519 \r
1520                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1521                                 prvUnlockQueue( pxQueue );\r
1522                                 if( xTaskResumeAll() == pdFALSE )\r
1523                                 {\r
1524                                         portYIELD_WITHIN_API();\r
1525                                 }\r
1526                                 else\r
1527                                 {\r
1528                                         mtCOVERAGE_TEST_MARKER();\r
1529                                 }\r
1530                         }\r
1531                         else\r
1532                         {\r
1533                                 /* Try again. */\r
1534                                 prvUnlockQueue( pxQueue );\r
1535                                 ( void ) xTaskResumeAll();\r
1536                         }\r
1537                 }\r
1538                 else\r
1539                 {\r
1540                         prvUnlockQueue( pxQueue );\r
1541                         ( void ) xTaskResumeAll();\r
1542 \r
1543                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1544                         {\r
1545                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1546                                 return errQUEUE_EMPTY;\r
1547                         }\r
1548                         else\r
1549                         {\r
1550                                 mtCOVERAGE_TEST_MARKER();\r
1551                         }\r
1552                 }\r
1553         }\r
1554 }\r
1555 /*-----------------------------------------------------------*/\r
1556 \r
1557 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1558 {\r
1559 BaseType_t xReturn;\r
1560 UBaseType_t uxSavedInterruptStatus;\r
1561 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1562 \r
1563         configASSERT( pxQueue );\r
1564         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1565 \r
1566         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1567         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1568         above the maximum system call priority are kept permanently enabled, even\r
1569         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1570         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1571         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1572         failure if a FreeRTOS API function is called from an interrupt that has been\r
1573         assigned a priority above the configured maximum system call priority.\r
1574         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1575         that have been assigned a priority at or (logically) below the maximum\r
1576         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1577         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1578         More information (albeit Cortex-M specific) is provided on the following\r
1579         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1580         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1581 \r
1582         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1583         {\r
1584                 /* Cannot block in an ISR, so check there is data available. */\r
1585                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1586                 {\r
1587                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1588 \r
1589                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1590                         --( pxQueue->uxMessagesWaiting );\r
1591 \r
1592                         /* If the queue is locked the event list will not be modified.\r
1593                         Instead update the lock count so the task that unlocks the queue\r
1594                         will know that an ISR has removed data while the queue was\r
1595                         locked. */\r
1596                         if( pxQueue->xRxLock == queueUNLOCKED )\r
1597                         {\r
1598                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1599                                 {\r
1600                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1601                                         {\r
1602                                                 /* The task waiting has a higher priority than us so\r
1603                                                 force a context switch. */\r
1604                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1605                                                 {\r
1606                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1607                                                 }\r
1608                                                 else\r
1609                                                 {\r
1610                                                         mtCOVERAGE_TEST_MARKER();\r
1611                                                 }\r
1612                                         }\r
1613                                         else\r
1614                                         {\r
1615                                                 mtCOVERAGE_TEST_MARKER();\r
1616                                         }\r
1617                                 }\r
1618                                 else\r
1619                                 {\r
1620                                         mtCOVERAGE_TEST_MARKER();\r
1621                                 }\r
1622                         }\r
1623                         else\r
1624                         {\r
1625                                 /* Increment the lock count so the task that unlocks the queue\r
1626                                 knows that data was removed while it was locked. */\r
1627                                 ++( pxQueue->xRxLock );\r
1628                         }\r
1629 \r
1630                         xReturn = pdPASS;\r
1631                 }\r
1632                 else\r
1633                 {\r
1634                         xReturn = pdFAIL;\r
1635                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1636                 }\r
1637         }\r
1638         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1639 \r
1640         return xReturn;\r
1641 }\r
1642 /*-----------------------------------------------------------*/\r
1643 \r
1644 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1645 {\r
1646 BaseType_t xReturn;\r
1647 UBaseType_t uxSavedInterruptStatus;\r
1648 int8_t *pcOriginalReadPosition;\r
1649 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1650 \r
1651         configASSERT( pxQueue );\r
1652         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1653         configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
1654 \r
1655         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1656         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1657         above the maximum system call priority are kept permanently enabled, even\r
1658         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1659         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1660         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1661         failure if a FreeRTOS API function is called from an interrupt that has been\r
1662         assigned a priority above the configured maximum system call priority.\r
1663         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1664         that have been assigned a priority at or (logically) below the maximum\r
1665         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1666         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1667         More information (albeit Cortex-M specific) is provided on the following\r
1668         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1669         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1670 \r
1671         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1672         {\r
1673                 /* Cannot block in an ISR, so check there is data available. */\r
1674                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1675                 {\r
1676                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1677 \r
1678                         /* Remember the read position so it can be reset as nothing is\r
1679                         actually being removed from the queue. */\r
1680                         pcOriginalReadPosition = pxQueue->u.pcReadFrom;\r
1681                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1682                         pxQueue->u.pcReadFrom = pcOriginalReadPosition;\r
1683 \r
1684                         xReturn = pdPASS;\r
1685                 }\r
1686                 else\r
1687                 {\r
1688                         xReturn = pdFAIL;\r
1689                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1690                 }\r
1691         }\r
1692         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1693 \r
1694         return xReturn;\r
1695 }\r
1696 /*-----------------------------------------------------------*/\r
1697 \r
1698 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1699 {\r
1700 UBaseType_t uxReturn;\r
1701 \r
1702         configASSERT( xQueue );\r
1703 \r
1704         taskENTER_CRITICAL();\r
1705         {\r
1706                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1707         }\r
1708         taskEXIT_CRITICAL();\r
1709 \r
1710         return uxReturn;\r
1711 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1712 /*-----------------------------------------------------------*/\r
1713 \r
1714 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1715 {\r
1716 UBaseType_t uxReturn;\r
1717 Queue_t *pxQueue;\r
1718 \r
1719         pxQueue = ( Queue_t * ) xQueue;\r
1720         configASSERT( pxQueue );\r
1721 \r
1722         taskENTER_CRITICAL();\r
1723         {\r
1724                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1725         }\r
1726         taskEXIT_CRITICAL();\r
1727 \r
1728         return uxReturn;\r
1729 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1730 /*-----------------------------------------------------------*/\r
1731 \r
1732 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1733 {\r
1734 UBaseType_t uxReturn;\r
1735 \r
1736         configASSERT( xQueue );\r
1737 \r
1738         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1739 \r
1740         return uxReturn;\r
1741 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1742 /*-----------------------------------------------------------*/\r
1743 \r
1744 void vQueueDelete( QueueHandle_t xQueue )\r
1745 {\r
1746 Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
1747 \r
1748         configASSERT( pxQueue );\r
1749 \r
1750         traceQUEUE_DELETE( pxQueue );\r
1751         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1752         {\r
1753                 vQueueUnregisterQueue( pxQueue );\r
1754         }\r
1755         #endif\r
1756         vPortFree( pxQueue );\r
1757 }\r
1758 /*-----------------------------------------------------------*/\r
1759 \r
1760 #if ( configUSE_TRACE_FACILITY == 1 )\r
1761 \r
1762         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
1763         {\r
1764                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
1765         }\r
1766 \r
1767 #endif /* configUSE_TRACE_FACILITY */\r
1768 /*-----------------------------------------------------------*/\r
1769 \r
1770 #if ( configUSE_TRACE_FACILITY == 1 )\r
1771 \r
1772         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
1773         {\r
1774                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
1775         }\r
1776 \r
1777 #endif /* configUSE_TRACE_FACILITY */\r
1778 /*-----------------------------------------------------------*/\r
1779 \r
1780 #if ( configUSE_TRACE_FACILITY == 1 )\r
1781 \r
1782         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
1783         {\r
1784                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
1785         }\r
1786 \r
1787 #endif /* configUSE_TRACE_FACILITY */\r
1788 /*-----------------------------------------------------------*/\r
1789 \r
1790 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
1791 {\r
1792 BaseType_t xReturn = pdFALSE;\r
1793 \r
1794         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
1795         {\r
1796                 #if ( configUSE_MUTEXES == 1 )\r
1797                 {\r
1798                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1799                         {\r
1800                                 /* The mutex is no longer being held. */\r
1801                                 xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );\r
1802                                 pxQueue->pxMutexHolder = NULL;\r
1803                         }\r
1804                         else\r
1805                         {\r
1806                                 mtCOVERAGE_TEST_MARKER();\r
1807                         }\r
1808                 }\r
1809                 #endif /* configUSE_MUTEXES */\r
1810         }\r
1811         else if( xPosition == queueSEND_TO_BACK )\r
1812         {\r
1813                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */\r
1814                 pxQueue->pcWriteTo += pxQueue->uxItemSize;\r
1815                 if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1816                 {\r
1817                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
1818                 }\r
1819                 else\r
1820                 {\r
1821                         mtCOVERAGE_TEST_MARKER();\r
1822                 }\r
1823         }\r
1824         else\r
1825         {\r
1826                 ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
1827                 pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;\r
1828                 if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
1829                 {\r
1830                         pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );\r
1831                 }\r
1832                 else\r
1833                 {\r
1834                         mtCOVERAGE_TEST_MARKER();\r
1835                 }\r
1836 \r
1837                 if( xPosition == queueOVERWRITE )\r
1838                 {\r
1839                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1840                         {\r
1841                                 /* An item is not being added but overwritten, so subtract\r
1842                                 one from the recorded number of items in the queue so when\r
1843                                 one is added again below the number of recorded items remains\r
1844                                 correct. */\r
1845                                 --( pxQueue->uxMessagesWaiting );\r
1846                         }\r
1847                         else\r
1848                         {\r
1849                                 mtCOVERAGE_TEST_MARKER();\r
1850                         }\r
1851                 }\r
1852                 else\r
1853                 {\r
1854                         mtCOVERAGE_TEST_MARKER();\r
1855                 }\r
1856         }\r
1857 \r
1858         ++( pxQueue->uxMessagesWaiting );\r
1859 \r
1860         return xReturn;\r
1861 }\r
1862 /*-----------------------------------------------------------*/\r
1863 \r
1864 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
1865 {\r
1866         if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
1867         {\r
1868                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
1869                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
1870                 {\r
1871                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
1872                 }\r
1873                 else\r
1874                 {\r
1875                         mtCOVERAGE_TEST_MARKER();\r
1876                 }\r
1877                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */\r
1878         }\r
1879 }\r
1880 /*-----------------------------------------------------------*/\r
1881 \r
1882 static void prvUnlockQueue( Queue_t * const pxQueue )\r
1883 {\r
1884         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
1885 \r
1886         /* The lock counts contains the number of extra data items placed or\r
1887         removed from the queue while the queue was locked.  When a queue is\r
1888         locked items can be added or removed, but the event lists cannot be\r
1889         updated. */\r
1890         taskENTER_CRITICAL();\r
1891         {\r
1892                 /* See if data was added to the queue while it was locked. */\r
1893                 while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )\r
1894                 {\r
1895                         /* Data was posted while the queue was locked.  Are any tasks\r
1896                         blocked waiting for data to become available? */\r
1897                         #if ( configUSE_QUEUE_SETS == 1 )\r
1898                         {\r
1899                                 if( pxQueue->pxQueueSetContainer != NULL )\r
1900                                 {\r
1901                                         if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )\r
1902                                         {\r
1903                                                 /* The queue is a member of a queue set, and posting to\r
1904                                                 the queue set caused a higher priority task to unblock.\r
1905                                                 A context switch is required. */\r
1906                                                 vTaskMissedYield();\r
1907                                         }\r
1908                                         else\r
1909                                         {\r
1910                                                 mtCOVERAGE_TEST_MARKER();\r
1911                                         }\r
1912                                 }\r
1913                                 else\r
1914                                 {\r
1915                                         /* Tasks that are removed from the event list will get added to\r
1916                                         the pending ready list as the scheduler is still suspended. */\r
1917                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1918                                         {\r
1919                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1920                                                 {\r
1921                                                         /* The task waiting has a higher priority so record that a\r
1922                                                         context switch is required. */\r
1923                                                         vTaskMissedYield();\r
1924                                                 }\r
1925                                                 else\r
1926                                                 {\r
1927                                                         mtCOVERAGE_TEST_MARKER();\r
1928                                                 }\r
1929                                         }\r
1930                                         else\r
1931                                         {\r
1932                                                 break;\r
1933                                         }\r
1934                                 }\r
1935                         }\r
1936                         #else /* configUSE_QUEUE_SETS */\r
1937                         {\r
1938                                 /* Tasks that are removed from the event list will get added to\r
1939                                 the pending ready list as the scheduler is still suspended. */\r
1940                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1941                                 {\r
1942                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1943                                         {\r
1944                                                 /* The task waiting has a higher priority so record that\r
1945                                                 a context switch is required. */\r
1946                                                 vTaskMissedYield();\r
1947                                         }\r
1948                                         else\r
1949                                         {\r
1950                                                 mtCOVERAGE_TEST_MARKER();\r
1951                                         }\r
1952                                 }\r
1953                                 else\r
1954                                 {\r
1955                                         break;\r
1956                                 }\r
1957                         }\r
1958                         #endif /* configUSE_QUEUE_SETS */\r
1959 \r
1960                         --( pxQueue->xTxLock );\r
1961                 }\r
1962 \r
1963                 pxQueue->xTxLock = queueUNLOCKED;\r
1964         }\r
1965         taskEXIT_CRITICAL();\r
1966 \r
1967         /* Do the same for the Rx lock. */\r
1968         taskENTER_CRITICAL();\r
1969         {\r
1970                 while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )\r
1971                 {\r
1972                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1973                         {\r
1974                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1975                                 {\r
1976                                         vTaskMissedYield();\r
1977                                 }\r
1978                                 else\r
1979                                 {\r
1980                                         mtCOVERAGE_TEST_MARKER();\r
1981                                 }\r
1982 \r
1983                                 --( pxQueue->xRxLock );\r
1984                         }\r
1985                         else\r
1986                         {\r
1987                                 break;\r
1988                         }\r
1989                 }\r
1990 \r
1991                 pxQueue->xRxLock = queueUNLOCKED;\r
1992         }\r
1993         taskEXIT_CRITICAL();\r
1994 }\r
1995 /*-----------------------------------------------------------*/\r
1996 \r
1997 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
1998 {\r
1999 BaseType_t xReturn;\r
2000 \r
2001         taskENTER_CRITICAL();\r
2002         {\r
2003                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
2004                 {\r
2005                         xReturn = pdTRUE;\r
2006                 }\r
2007                 else\r
2008                 {\r
2009                         xReturn = pdFALSE;\r
2010                 }\r
2011         }\r
2012         taskEXIT_CRITICAL();\r
2013 \r
2014         return xReturn;\r
2015 }\r
2016 /*-----------------------------------------------------------*/\r
2017 \r
2018 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
2019 {\r
2020 BaseType_t xReturn;\r
2021 \r
2022         configASSERT( xQueue );\r
2023         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2024         {\r
2025                 xReturn = pdTRUE;\r
2026         }\r
2027         else\r
2028         {\r
2029                 xReturn = pdFALSE;\r
2030         }\r
2031 \r
2032         return xReturn;\r
2033 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2034 /*-----------------------------------------------------------*/\r
2035 \r
2036 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
2037 {\r
2038 BaseType_t xReturn;\r
2039 \r
2040         taskENTER_CRITICAL();\r
2041         {\r
2042                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
2043                 {\r
2044                         xReturn = pdTRUE;\r
2045                 }\r
2046                 else\r
2047                 {\r
2048                         xReturn = pdFALSE;\r
2049                 }\r
2050         }\r
2051         taskEXIT_CRITICAL();\r
2052 \r
2053         return xReturn;\r
2054 }\r
2055 /*-----------------------------------------------------------*/\r
2056 \r
2057 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
2058 {\r
2059 BaseType_t xReturn;\r
2060 \r
2061         configASSERT( xQueue );\r
2062         if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )\r
2063         {\r
2064                 xReturn = pdTRUE;\r
2065         }\r
2066         else\r
2067         {\r
2068                 xReturn = pdFALSE;\r
2069         }\r
2070 \r
2071         return xReturn;\r
2072 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2073 /*-----------------------------------------------------------*/\r
2074 \r
2075 #if ( configUSE_CO_ROUTINES == 1 )\r
2076 \r
2077         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
2078         {\r
2079         BaseType_t xReturn;\r
2080         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2081 \r
2082                 /* If the queue is already full we may have to block.  A critical section\r
2083                 is required to prevent an interrupt removing something from the queue\r
2084                 between the check to see if the queue is full and blocking on the queue. */\r
2085                 portDISABLE_INTERRUPTS();\r
2086                 {\r
2087                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
2088                         {\r
2089                                 /* The queue is full - do we want to block or just leave without\r
2090                                 posting? */\r
2091                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2092                                 {\r
2093                                         /* As this is called from a coroutine we cannot block directly, but\r
2094                                         return indicating that we need to block. */\r
2095                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
2096                                         portENABLE_INTERRUPTS();\r
2097                                         return errQUEUE_BLOCKED;\r
2098                                 }\r
2099                                 else\r
2100                                 {\r
2101                                         portENABLE_INTERRUPTS();\r
2102                                         return errQUEUE_FULL;\r
2103                                 }\r
2104                         }\r
2105                 }\r
2106                 portENABLE_INTERRUPTS();\r
2107 \r
2108                 portDISABLE_INTERRUPTS();\r
2109                 {\r
2110                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2111                         {\r
2112                                 /* There is room in the queue, copy the data into the queue. */\r
2113                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2114                                 xReturn = pdPASS;\r
2115 \r
2116                                 /* Were any co-routines waiting for data to become available? */\r
2117                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2118                                 {\r
2119                                         /* In this instance the co-routine could be placed directly\r
2120                                         into the ready list as we are within a critical section.\r
2121                                         Instead the same pending ready list mechanism is used as if\r
2122                                         the event were caused from within an interrupt. */\r
2123                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2124                                         {\r
2125                                                 /* The co-routine waiting has a higher priority so record\r
2126                                                 that a yield might be appropriate. */\r
2127                                                 xReturn = errQUEUE_YIELD;\r
2128                                         }\r
2129                                         else\r
2130                                         {\r
2131                                                 mtCOVERAGE_TEST_MARKER();\r
2132                                         }\r
2133                                 }\r
2134                                 else\r
2135                                 {\r
2136                                         mtCOVERAGE_TEST_MARKER();\r
2137                                 }\r
2138                         }\r
2139                         else\r
2140                         {\r
2141                                 xReturn = errQUEUE_FULL;\r
2142                         }\r
2143                 }\r
2144                 portENABLE_INTERRUPTS();\r
2145 \r
2146                 return xReturn;\r
2147         }\r
2148 \r
2149 #endif /* configUSE_CO_ROUTINES */\r
2150 /*-----------------------------------------------------------*/\r
2151 \r
2152 #if ( configUSE_CO_ROUTINES == 1 )\r
2153 \r
2154         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
2155         {\r
2156         BaseType_t xReturn;\r
2157         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2158 \r
2159                 /* If the queue is already empty we may have to block.  A critical section\r
2160                 is required to prevent an interrupt adding something to the queue\r
2161                 between the check to see if the queue is empty and blocking on the queue. */\r
2162                 portDISABLE_INTERRUPTS();\r
2163                 {\r
2164                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2165                         {\r
2166                                 /* There are no messages in the queue, do we want to block or just\r
2167                                 leave with nothing? */\r
2168                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2169                                 {\r
2170                                         /* As this is a co-routine we cannot block directly, but return\r
2171                                         indicating that we need to block. */\r
2172                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
2173                                         portENABLE_INTERRUPTS();\r
2174                                         return errQUEUE_BLOCKED;\r
2175                                 }\r
2176                                 else\r
2177                                 {\r
2178                                         portENABLE_INTERRUPTS();\r
2179                                         return errQUEUE_FULL;\r
2180                                 }\r
2181                         }\r
2182                         else\r
2183                         {\r
2184                                 mtCOVERAGE_TEST_MARKER();\r
2185                         }\r
2186                 }\r
2187                 portENABLE_INTERRUPTS();\r
2188 \r
2189                 portDISABLE_INTERRUPTS();\r
2190                 {\r
2191                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2192                         {\r
2193                                 /* Data is available from the queue. */\r
2194                                 pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2195                                 if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2196                                 {\r
2197                                         pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2198                                 }\r
2199                                 else\r
2200                                 {\r
2201                                         mtCOVERAGE_TEST_MARKER();\r
2202                                 }\r
2203                                 --( pxQueue->uxMessagesWaiting );\r
2204                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2205 \r
2206                                 xReturn = pdPASS;\r
2207 \r
2208                                 /* Were any co-routines waiting for space to become available? */\r
2209                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2210                                 {\r
2211                                         /* In this instance the co-routine could be placed directly\r
2212                                         into the ready list as we are within a critical section.\r
2213                                         Instead the same pending ready list mechanism is used as if\r
2214                                         the event were caused from within an interrupt. */\r
2215                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2216                                         {\r
2217                                                 xReturn = errQUEUE_YIELD;\r
2218                                         }\r
2219                                         else\r
2220                                         {\r
2221                                                 mtCOVERAGE_TEST_MARKER();\r
2222                                         }\r
2223                                 }\r
2224                                 else\r
2225                                 {\r
2226                                         mtCOVERAGE_TEST_MARKER();\r
2227                                 }\r
2228                         }\r
2229                         else\r
2230                         {\r
2231                                 xReturn = pdFAIL;\r
2232                         }\r
2233                 }\r
2234                 portENABLE_INTERRUPTS();\r
2235 \r
2236                 return xReturn;\r
2237         }\r
2238 \r
2239 #endif /* configUSE_CO_ROUTINES */\r
2240 /*-----------------------------------------------------------*/\r
2241 \r
2242 #if ( configUSE_CO_ROUTINES == 1 )\r
2243 \r
2244         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2245         {\r
2246         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2247 \r
2248                 /* Cannot block within an ISR so if there is no space on the queue then\r
2249                 exit without doing anything. */\r
2250                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2251                 {\r
2252                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2253 \r
2254                         /* We only want to wake one co-routine per ISR, so check that a\r
2255                         co-routine has not already been woken. */\r
2256                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2257                         {\r
2258                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2259                                 {\r
2260                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2261                                         {\r
2262                                                 return pdTRUE;\r
2263                                         }\r
2264                                         else\r
2265                                         {\r
2266                                                 mtCOVERAGE_TEST_MARKER();\r
2267                                         }\r
2268                                 }\r
2269                                 else\r
2270                                 {\r
2271                                         mtCOVERAGE_TEST_MARKER();\r
2272                                 }\r
2273                         }\r
2274                         else\r
2275                         {\r
2276                                 mtCOVERAGE_TEST_MARKER();\r
2277                         }\r
2278                 }\r
2279                 else\r
2280                 {\r
2281                         mtCOVERAGE_TEST_MARKER();\r
2282                 }\r
2283 \r
2284                 return xCoRoutinePreviouslyWoken;\r
2285         }\r
2286 \r
2287 #endif /* configUSE_CO_ROUTINES */\r
2288 /*-----------------------------------------------------------*/\r
2289 \r
2290 #if ( configUSE_CO_ROUTINES == 1 )\r
2291 \r
2292         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2293         {\r
2294         BaseType_t xReturn;\r
2295         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2296 \r
2297                 /* We cannot block from an ISR, so check there is data available. If\r
2298                 not then just leave without doing anything. */\r
2299                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2300                 {\r
2301                         /* Copy the data from the queue. */\r
2302                         pxQueue->u.pcReadFrom += pxQueue->uxItemSize;\r
2303                         if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )\r
2304                         {\r
2305                                 pxQueue->u.pcReadFrom = pxQueue->pcHead;\r
2306                         }\r
2307                         else\r
2308                         {\r
2309                                 mtCOVERAGE_TEST_MARKER();\r
2310                         }\r
2311                         --( pxQueue->uxMessagesWaiting );\r
2312                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2313 \r
2314                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2315                         {\r
2316                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2317                                 {\r
2318                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2319                                         {\r
2320                                                 *pxCoRoutineWoken = pdTRUE;\r
2321                                         }\r
2322                                         else\r
2323                                         {\r
2324                                                 mtCOVERAGE_TEST_MARKER();\r
2325                                         }\r
2326                                 }\r
2327                                 else\r
2328                                 {\r
2329                                         mtCOVERAGE_TEST_MARKER();\r
2330                                 }\r
2331                         }\r
2332                         else\r
2333                         {\r
2334                                 mtCOVERAGE_TEST_MARKER();\r
2335                         }\r
2336 \r
2337                         xReturn = pdPASS;\r
2338                 }\r
2339                 else\r
2340                 {\r
2341                         xReturn = pdFAIL;\r
2342                 }\r
2343 \r
2344                 return xReturn;\r
2345         }\r
2346 \r
2347 #endif /* configUSE_CO_ROUTINES */\r
2348 /*-----------------------------------------------------------*/\r
2349 \r
2350 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2351 \r
2352         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2353         {\r
2354         UBaseType_t ux;\r
2355 \r
2356                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2357                 a free slot. */\r
2358                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2359                 {\r
2360                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2361                         {\r
2362                                 /* Store the information on this queue. */\r
2363                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2364                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2365 \r
2366                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2367                                 break;\r
2368                         }\r
2369                         else\r
2370                         {\r
2371                                 mtCOVERAGE_TEST_MARKER();\r
2372                         }\r
2373                 }\r
2374         }\r
2375 \r
2376 #endif /* configQUEUE_REGISTRY_SIZE */\r
2377 /*-----------------------------------------------------------*/\r
2378 \r
2379 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2380 \r
2381         const char *pcQueueGetQueueName( QueueHandle_t xQueue )\r
2382         {\r
2383         UBaseType_t ux;\r
2384         const char *pcReturn = NULL;\r
2385 \r
2386                 /* Note there is nothing here to protect against another task adding or\r
2387                 removing entries from the registry while it is being searched. */\r
2388                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2389                 {\r
2390                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2391                         {\r
2392                                 pcReturn = xQueueRegistry[ ux ].pcQueueName;\r
2393                                 break;\r
2394                         }\r
2395                         else\r
2396                         {\r
2397                                 mtCOVERAGE_TEST_MARKER();\r
2398                         }\r
2399                 }\r
2400 \r
2401                 return pcReturn;\r
2402         }\r
2403 \r
2404 #endif /* configQUEUE_REGISTRY_SIZE */\r
2405 /*-----------------------------------------------------------*/\r
2406 \r
2407 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2408 \r
2409         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2410         {\r
2411         UBaseType_t ux;\r
2412 \r
2413                 /* See if the handle of the queue being unregistered in actually in the\r
2414                 registry. */\r
2415                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2416                 {\r
2417                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2418                         {\r
2419                                 /* Set the name to NULL to show that this slot if free again. */\r
2420                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2421 \r
2422                                 /* Set the handle to NULL to ensure the same queue handle cannot\r
2423                                 appear in the registry twice if it is added, removed, then\r
2424                                 added again. */\r
2425                                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;\r
2426                                 break;\r
2427                         }\r
2428                         else\r
2429                         {\r
2430                                 mtCOVERAGE_TEST_MARKER();\r
2431                         }\r
2432                 }\r
2433 \r
2434         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2435 \r
2436 #endif /* configQUEUE_REGISTRY_SIZE */\r
2437 /*-----------------------------------------------------------*/\r
2438 \r
2439 #if ( configUSE_TIMERS == 1 )\r
2440 \r
2441         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
2442         {\r
2443         Queue_t * const pxQueue = ( Queue_t * ) xQueue;\r
2444 \r
2445                 /* This function should not be called by application code hence the\r
2446                 'Restricted' in its name.  It is not part of the public API.  It is\r
2447                 designed for use by kernel code, and has special calling requirements.\r
2448                 It can result in vListInsert() being called on a list that can only\r
2449                 possibly ever have one item in it, so the list will be fast, but even\r
2450                 so it should be called with the scheduler locked and not from a critical\r
2451                 section. */\r
2452 \r
2453                 /* Only do anything if there are no messages in the queue.  This function\r
2454                 will not actually cause the task to block, just place it on a blocked\r
2455                 list.  It will not block until the scheduler is unlocked - at which\r
2456                 time a yield will be performed.  If an item is added to the queue while\r
2457                 the queue is locked, and the calling task blocks on the queue, then the\r
2458                 calling task will be immediately unblocked when the queue is unlocked. */\r
2459                 prvLockQueue( pxQueue );\r
2460                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2461                 {\r
2462                         /* There is nothing in the queue, block for the specified period. */\r
2463                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );\r
2464                 }\r
2465                 else\r
2466                 {\r
2467                         mtCOVERAGE_TEST_MARKER();\r
2468                 }\r
2469                 prvUnlockQueue( pxQueue );\r
2470         }\r
2471 \r
2472 #endif /* configUSE_TIMERS */\r
2473 /*-----------------------------------------------------------*/\r
2474 \r
2475 #if ( configUSE_QUEUE_SETS == 1 )\r
2476 \r
2477         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2478         {\r
2479         QueueSetHandle_t pxQueue;\r
2480 \r
2481                 pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2482 \r
2483                 return pxQueue;\r
2484         }\r
2485 \r
2486 #endif /* configUSE_QUEUE_SETS */\r
2487 /*-----------------------------------------------------------*/\r
2488 \r
2489 #if ( configUSE_QUEUE_SETS == 1 )\r
2490 \r
2491         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2492         {\r
2493         BaseType_t xReturn;\r
2494 \r
2495                 taskENTER_CRITICAL();\r
2496                 {\r
2497                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2498                         {\r
2499                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2500                                 xReturn = pdFAIL;\r
2501                         }\r
2502                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2503                         {\r
2504                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2505                                 items in the queue/semaphore. */\r
2506                                 xReturn = pdFAIL;\r
2507                         }\r
2508                         else\r
2509                         {\r
2510                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2511                                 xReturn = pdPASS;\r
2512                         }\r
2513                 }\r
2514                 taskEXIT_CRITICAL();\r
2515 \r
2516                 return xReturn;\r
2517         }\r
2518 \r
2519 #endif /* configUSE_QUEUE_SETS */\r
2520 /*-----------------------------------------------------------*/\r
2521 \r
2522 #if ( configUSE_QUEUE_SETS == 1 )\r
2523 \r
2524         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2525         {\r
2526         BaseType_t xReturn;\r
2527         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2528 \r
2529                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2530                 {\r
2531                         /* The queue was not a member of the set. */\r
2532                         xReturn = pdFAIL;\r
2533                 }\r
2534                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2535                 {\r
2536                         /* It is dangerous to remove a queue from a set when the queue is\r
2537                         not empty because the queue set will still hold pending events for\r
2538                         the queue. */\r
2539                         xReturn = pdFAIL;\r
2540                 }\r
2541                 else\r
2542                 {\r
2543                         taskENTER_CRITICAL();\r
2544                         {\r
2545                                 /* The queue is no longer contained in the set. */\r
2546                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2547                         }\r
2548                         taskEXIT_CRITICAL();\r
2549                         xReturn = pdPASS;\r
2550                 }\r
2551 \r
2552                 return xReturn;\r
2553         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2554 \r
2555 #endif /* configUSE_QUEUE_SETS */\r
2556 /*-----------------------------------------------------------*/\r
2557 \r
2558 #if ( configUSE_QUEUE_SETS == 1 )\r
2559 \r
2560         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2561         {\r
2562         QueueSetMemberHandle_t xReturn = NULL;\r
2563 \r
2564                 ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2565                 return xReturn;\r
2566         }\r
2567 \r
2568 #endif /* configUSE_QUEUE_SETS */\r
2569 /*-----------------------------------------------------------*/\r
2570 \r
2571 #if ( configUSE_QUEUE_SETS == 1 )\r
2572 \r
2573         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2574         {\r
2575         QueueSetMemberHandle_t xReturn = NULL;\r
2576 \r
2577                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2578                 return xReturn;\r
2579         }\r
2580 \r
2581 #endif /* configUSE_QUEUE_SETS */\r
2582 /*-----------------------------------------------------------*/\r
2583 \r
2584 #if ( configUSE_QUEUE_SETS == 1 )\r
2585 \r
2586         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )\r
2587         {\r
2588         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2589         BaseType_t xReturn = pdFALSE;\r
2590 \r
2591                 /* This function must be called form a critical section. */\r
2592 \r
2593                 configASSERT( pxQueueSetContainer );\r
2594                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2595 \r
2596                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2597                 {\r
2598                         traceQUEUE_SEND( pxQueueSetContainer );\r
2599 \r
2600                         /* The data copied is the handle of the queue that contains data. */\r
2601                         xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );\r
2602 \r
2603                         if( pxQueueSetContainer->xTxLock == queueUNLOCKED )\r
2604                         {\r
2605                                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2606                                 {\r
2607                                         if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2608                                         {\r
2609                                                 /* The task waiting has a higher priority. */\r
2610                                                 xReturn = pdTRUE;\r
2611                                         }\r
2612                                         else\r
2613                                         {\r
2614                                                 mtCOVERAGE_TEST_MARKER();\r
2615                                         }\r
2616                                 }\r
2617                                 else\r
2618                                 {\r
2619                                         mtCOVERAGE_TEST_MARKER();\r
2620                                 }\r
2621                         }\r
2622                         else\r
2623                         {\r
2624                                 ( pxQueueSetContainer->xTxLock )++;\r
2625                         }\r
2626                 }\r
2627                 else\r
2628                 {\r
2629                         mtCOVERAGE_TEST_MARKER();\r
2630                 }\r
2631 \r
2632                 return xReturn;\r
2633         }\r
2634 \r
2635 #endif /* configUSE_QUEUE_SETS */\r
2636 \r
2637 \r
2638 \r
2639 \r
2640 \r
2641 \r
2642 \r
2643 \r
2644 \r
2645 \r
2646 \r
2647 \r