]> begriffs open source - freertos/blob - queue.c
Improvement to O.F. protections (#75)
[freertos] / queue.c
1 /*\r
2  * FreeRTOS Kernel V10.3.1\r
3  * Copyright (C) 2020 Amazon.com, Inc. or its affiliates.  All Rights Reserved.\r
4  *\r
5  * Permission is hereby granted, free of charge, to any person obtaining a copy of\r
6  * this software and associated documentation files (the "Software"), to deal in\r
7  * the Software without restriction, including without limitation the rights to\r
8  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\r
9  * the Software, and to permit persons to whom the Software is furnished to do so,\r
10  * subject to the following conditions:\r
11  *\r
12  * The above copyright notice and this permission notice shall be included in all\r
13  * copies or substantial portions of the Software.\r
14  *\r
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\r
17  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\r
18  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
21  *\r
22  * http://www.FreeRTOS.org\r
23  * http://aws.amazon.com/freertos\r
24  *\r
25  * 1 tab == 4 spaces!\r
26  */\r
27 \r
28 #include <stdlib.h>\r
29 #include <string.h>\r
30 \r
31 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining\r
32 all the API functions to use the MPU wrappers.  That should only be done when\r
33 task.h is included from an application file. */\r
34 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE\r
35 \r
36 #include "FreeRTOS.h"\r
37 #include "task.h"\r
38 #include "queue.h"\r
39 \r
40 #if ( configUSE_CO_ROUTINES == 1 )\r
41         #include "croutine.h"\r
42 #endif\r
43 \r
44 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified\r
45 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined\r
46 for the header files above, but not in this file, in order to generate the\r
47 correct privileged Vs unprivileged linkage and placement. */\r
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */\r
49 \r
50 \r
51 /* Constants used with the cRxLock and cTxLock structure members. */\r
52 #define queueUNLOCKED                                   ( ( int8_t ) -1 )\r
53 #define queueLOCKED_UNMODIFIED                  ( ( int8_t ) 0 )\r
54 #define queueINT8_MAX                                   ( ( int8_t ) 127 )\r
55 \r
56 /* When the Queue_t structure is used to represent a base queue its pcHead and\r
57 pcTail members are used as pointers into the queue storage area.  When the\r
58 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are\r
59 not necessary, and the pcHead pointer is set to NULL to indicate that the\r
60 structure instead holds a pointer to the mutex holder (if any).  Map alternative\r
61 names to the pcHead and structure member to ensure the readability of the code\r
62 is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form\r
63 a union as their usage is mutually exclusive dependent on what the queue is\r
64 being used for. */\r
65 #define uxQueueType                                             pcHead\r
66 #define queueQUEUE_IS_MUTEX                             NULL\r
67 \r
68 typedef struct QueuePointers\r
69 {\r
70         int8_t *pcTail;                                 /*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */\r
71         int8_t *pcReadFrom;                             /*< Points to the last place that a queued item was read from when the structure is used as a queue. */\r
72 } QueuePointers_t;\r
73 \r
74 typedef struct SemaphoreData\r
75 {\r
76         TaskHandle_t xMutexHolder;               /*< The handle of the task that holds the mutex. */\r
77         UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */\r
78 } SemaphoreData_t;\r
79 \r
80 /* Semaphores do not actually store or copy data, so have an item size of\r
81 zero. */\r
82 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )\r
83 #define queueMUTEX_GIVE_BLOCK_TIME               ( ( TickType_t ) 0U )\r
84 \r
85 #if( configUSE_PREEMPTION == 0 )\r
86         /* If the cooperative scheduler is being used then a yield should not be\r
87         performed just because a higher priority task has been woken. */\r
88         #define queueYIELD_IF_USING_PREEMPTION()\r
89 #else\r
90         #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()\r
91 #endif\r
92 \r
93 /*\r
94  * Definition of the queue used by the scheduler.\r
95  * Items are queued by copy, not reference.  See the following link for the\r
96  * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html\r
97  */\r
98 typedef struct QueueDefinition          /* The old naming convention is used to prevent breaking kernel aware debuggers. */\r
99 {\r
100         int8_t *pcHead;                                 /*< Points to the beginning of the queue storage area. */\r
101         int8_t *pcWriteTo;                              /*< Points to the free next place in the storage area. */\r
102 \r
103         union\r
104         {\r
105                 QueuePointers_t xQueue;         /*< Data required exclusively when this structure is used as a queue. */\r
106                 SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */\r
107         } u;\r
108 \r
109         List_t xTasksWaitingToSend;             /*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */\r
110         List_t xTasksWaitingToReceive;  /*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */\r
111 \r
112         volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */\r
113         UBaseType_t uxLength;                   /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */\r
114         UBaseType_t uxItemSize;                 /*< The size of each items that the queue will hold. */\r
115 \r
116         volatile int8_t cRxLock;                /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
117         volatile int8_t cTxLock;                /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */\r
118 \r
119         #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
120                 uint8_t ucStaticallyAllocated;  /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */\r
121         #endif\r
122 \r
123         #if ( configUSE_QUEUE_SETS == 1 )\r
124                 struct QueueDefinition *pxQueueSetContainer;\r
125         #endif\r
126 \r
127         #if ( configUSE_TRACE_FACILITY == 1 )\r
128                 UBaseType_t uxQueueNumber;\r
129                 uint8_t ucQueueType;\r
130         #endif\r
131 \r
132 } xQUEUE;\r
133 \r
134 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t\r
135 name below to enable the use of older kernel aware debuggers. */\r
136 typedef xQUEUE Queue_t;\r
137 \r
138 /*-----------------------------------------------------------*/\r
139 \r
140 /*\r
141  * The queue registry is just a means for kernel aware debuggers to locate\r
142  * queue structures.  It has no other purpose so is an optional component.\r
143  */\r
144 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
145 \r
146         /* The type stored within the queue registry array.  This allows a name\r
147         to be assigned to each queue making kernel aware debugging a little\r
148         more user friendly. */\r
149         typedef struct QUEUE_REGISTRY_ITEM\r
150         {\r
151                 const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
152                 QueueHandle_t xHandle;\r
153         } xQueueRegistryItem;\r
154 \r
155         /* The old xQueueRegistryItem name is maintained above then typedefed to the\r
156         new xQueueRegistryItem name below to enable the use of older kernel aware\r
157         debuggers. */\r
158         typedef xQueueRegistryItem QueueRegistryItem_t;\r
159 \r
160         /* The queue registry is simply an array of QueueRegistryItem_t structures.\r
161         The pcQueueName member of a structure being NULL is indicative of the\r
162         array position being vacant. */\r
163         PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];\r
164 \r
165 #endif /* configQUEUE_REGISTRY_SIZE */\r
166 \r
167 /*\r
168  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not\r
169  * prevent an ISR from adding or removing items to the queue, but does prevent\r
170  * an ISR from removing tasks from the queue event lists.  If an ISR finds a\r
171  * queue is locked it will instead increment the appropriate queue lock count\r
172  * to indicate that a task may require unblocking.  When the queue in unlocked\r
173  * these lock counts are inspected, and the appropriate action taken.\r
174  */\r
175 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
176 \r
177 /*\r
178  * Uses a critical section to determine if there is any data in a queue.\r
179  *\r
180  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.\r
181  */\r
182 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
183 \r
184 /*\r
185  * Uses a critical section to determine if there is any space in a queue.\r
186  *\r
187  * @return pdTRUE if there is no space, otherwise pdFALSE;\r
188  */\r
189 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;\r
190 \r
191 /*\r
192  * Copies an item into the queue, either at the front of the queue or the\r
193  * back of the queue.\r
194  */\r
195 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;\r
196 \r
197 /*\r
198  * Copies an item out of a queue.\r
199  */\r
200 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;\r
201 \r
202 #if ( configUSE_QUEUE_SETS == 1 )\r
203         /*\r
204          * Checks to see if a queue is a member of a queue set, and if so, notifies\r
205          * the queue set that the queue contains data.\r
206          */\r
207         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
208 #endif\r
209 \r
210 /*\r
211  * Called after a Queue_t structure has been allocated either statically or\r
212  * dynamically to fill in the structure's members.\r
213  */\r
214 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;\r
215 \r
216 /*\r
217  * Mutexes are a special type of queue.  When a mutex is created, first the\r
218  * queue is created, then prvInitialiseMutex() is called to configure the queue\r
219  * as a mutex.\r
220  */\r
221 #if( configUSE_MUTEXES == 1 )\r
222         static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;\r
223 #endif\r
224 \r
225 #if( configUSE_MUTEXES == 1 )\r
226         /*\r
227          * If a task waiting for a mutex causes the mutex holder to inherit a\r
228          * priority, but the waiting task times out, then the holder should\r
229          * disinherit the priority - but only down to the highest priority of any\r
230          * other tasks that are waiting for the same mutex.  This function returns\r
231          * that priority.\r
232          */\r
233         static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;\r
234 #endif\r
235 /*-----------------------------------------------------------*/\r
236 \r
237 /*\r
238  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from\r
239  * accessing the queue event lists.\r
240  */\r
241 #define prvLockQueue( pxQueue )                                                         \\r
242         taskENTER_CRITICAL();                                                                   \\r
243         {                                                                                                               \\r
244                 if( ( pxQueue )->cRxLock == queueUNLOCKED )                     \\r
245                 {                                                                                                       \\r
246                         ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED;  \\r
247                 }                                                                                                       \\r
248                 if( ( pxQueue )->cTxLock == queueUNLOCKED )                     \\r
249                 {                                                                                                       \\r
250                         ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED;  \\r
251                 }                                                                                                       \\r
252         }                                                                                                               \\r
253         taskEXIT_CRITICAL()\r
254 /*-----------------------------------------------------------*/\r
255 \r
256 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )\r
257 {\r
258 Queue_t * const pxQueue = xQueue;\r
259 \r
260         configASSERT( pxQueue );\r
261 \r
262         taskENTER_CRITICAL();\r
263         {\r
264                 pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */\r
265                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;\r
266                 pxQueue->pcWriteTo = pxQueue->pcHead;\r
267                 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */\r
268                 pxQueue->cRxLock = queueUNLOCKED;\r
269                 pxQueue->cTxLock = queueUNLOCKED;\r
270 \r
271                 if( xNewQueue == pdFALSE )\r
272                 {\r
273                         /* If there are tasks blocked waiting to read from the queue, then\r
274                         the tasks will remain blocked as after this function exits the queue\r
275                         will still be empty.  If there are tasks blocked waiting to write to\r
276                         the queue, then one should be unblocked as after this function exits\r
277                         it will be possible to write to it. */\r
278                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
279                         {\r
280                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
281                                 {\r
282                                         queueYIELD_IF_USING_PREEMPTION();\r
283                                 }\r
284                                 else\r
285                                 {\r
286                                         mtCOVERAGE_TEST_MARKER();\r
287                                 }\r
288                         }\r
289                         else\r
290                         {\r
291                                 mtCOVERAGE_TEST_MARKER();\r
292                         }\r
293                 }\r
294                 else\r
295                 {\r
296                         /* Ensure the event queues start in the correct state. */\r
297                         vListInitialise( &( pxQueue->xTasksWaitingToSend ) );\r
298                         vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );\r
299                 }\r
300         }\r
301         taskEXIT_CRITICAL();\r
302 \r
303         /* A value is returned for calling semantic consistency with previous\r
304         versions. */\r
305         return pdPASS;\r
306 }\r
307 /*-----------------------------------------------------------*/\r
308 \r
309 #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
310 \r
311         QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )\r
312         {\r
313         Queue_t *pxNewQueue;\r
314 \r
315                 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
316 \r
317                 /* The StaticQueue_t structure and the queue storage area must be\r
318                 supplied. */\r
319                 configASSERT( pxStaticQueue != NULL );\r
320 \r
321                 /* A queue storage area should be provided if the item size is not 0, and\r
322                 should not be provided if the item size is 0. */\r
323                 configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );\r
324                 configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );\r
325 \r
326                 #if( configASSERT_DEFINED == 1 )\r
327                 {\r
328                         /* Sanity check that the size of the structure used to declare a\r
329                         variable of type StaticQueue_t or StaticSemaphore_t equals the size of\r
330                         the real queue and semaphore structures. */\r
331                         volatile size_t xSize = sizeof( StaticQueue_t );\r
332                         configASSERT( xSize == sizeof( Queue_t ) );\r
333                         ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */\r
334                 }\r
335                 #endif /* configASSERT_DEFINED */\r
336 \r
337                 /* The address of a statically allocated queue was passed in, use it.\r
338                 The address of a statically allocated storage area was also passed in\r
339                 but is already set. */\r
340                 pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */\r
341 \r
342                 if( pxNewQueue != NULL )\r
343                 {\r
344                         #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )\r
345                         {\r
346                                 /* Queues can be allocated wither statically or dynamically, so\r
347                                 note this queue was allocated statically in case the queue is\r
348                                 later deleted. */\r
349                                 pxNewQueue->ucStaticallyAllocated = pdTRUE;\r
350                         }\r
351                         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
352 \r
353                         prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );\r
354                 }\r
355                 else\r
356                 {\r
357                         traceQUEUE_CREATE_FAILED( ucQueueType );\r
358                         mtCOVERAGE_TEST_MARKER();\r
359                 }\r
360 \r
361                 return pxNewQueue;\r
362         }\r
363 \r
364 #endif /* configSUPPORT_STATIC_ALLOCATION */\r
365 /*-----------------------------------------------------------*/\r
366 \r
367 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )\r
368 \r
369         QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )\r
370         {\r
371         Queue_t *pxNewQueue;\r
372         size_t xQueueSizeInBytes;\r
373         uint8_t *pucQueueStorage;\r
374 \r
375                 configASSERT( uxQueueLength > ( UBaseType_t ) 0 );\r
376 \r
377                 /* Allocate enough space to hold the maximum number of items that\r
378                 can be in the queue at any time.  It is valid for uxItemSize to be\r
379                 zero in the case the queue is used as a semaphore. */\r
380                 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */\r
381 \r
382                 /* Check for multiplication overflow. */\r
383                 configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );\r
384 \r
385                 /* Allocate the queue and storage area.  Justification for MISRA\r
386                 deviation as follows:  pvPortMalloc() always ensures returned memory\r
387                 blocks are aligned per the requirements of the MCU stack.  In this case\r
388                 pvPortMalloc() must return a pointer that is guaranteed to meet the\r
389                 alignment requirements of the Queue_t structure - which in this case\r
390                 is an int8_t *.  Therefore, whenever the stack alignment requirements\r
391                 are greater than or equal to the pointer to char requirements the cast\r
392                 is safe.  In other cases alignment requirements are not strict (one or\r
393                 two bytes). */\r
394                 pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */\r
395 \r
396                 if( pxNewQueue != NULL )\r
397                 {\r
398                         /* Jump past the queue structure to find the location of the queue\r
399                         storage area. */\r
400                         pucQueueStorage = ( uint8_t * ) pxNewQueue;\r
401                         pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */\r
402 \r
403                         #if( configSUPPORT_STATIC_ALLOCATION == 1 )\r
404                         {\r
405                                 /* Queues can be created either statically or dynamically, so\r
406                                 note this task was created dynamically in case it is later\r
407                                 deleted. */\r
408                                 pxNewQueue->ucStaticallyAllocated = pdFALSE;\r
409                         }\r
410                         #endif /* configSUPPORT_STATIC_ALLOCATION */\r
411 \r
412                         prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );\r
413                 }\r
414                 else\r
415                 {\r
416                         traceQUEUE_CREATE_FAILED( ucQueueType );\r
417                         mtCOVERAGE_TEST_MARKER();\r
418                 }\r
419 \r
420                 return pxNewQueue;\r
421         }\r
422 \r
423 #endif /* configSUPPORT_STATIC_ALLOCATION */\r
424 /*-----------------------------------------------------------*/\r
425 \r
426 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )\r
427 {\r
428         /* Remove compiler warnings about unused parameters should\r
429         configUSE_TRACE_FACILITY not be set to 1. */\r
430         ( void ) ucQueueType;\r
431 \r
432         if( uxItemSize == ( UBaseType_t ) 0 )\r
433         {\r
434                 /* No RAM was allocated for the queue storage area, but PC head cannot\r
435                 be set to NULL because NULL is used as a key to say the queue is used as\r
436                 a mutex.  Therefore just set pcHead to point to the queue as a benign\r
437                 value that is known to be within the memory map. */\r
438                 pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;\r
439         }\r
440         else\r
441         {\r
442                 /* Set the head to the start of the queue storage area. */\r
443                 pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;\r
444         }\r
445 \r
446         /* Initialise the queue members as described where the queue type is\r
447         defined. */\r
448         pxNewQueue->uxLength = uxQueueLength;\r
449         pxNewQueue->uxItemSize = uxItemSize;\r
450         ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );\r
451 \r
452         #if ( configUSE_TRACE_FACILITY == 1 )\r
453         {\r
454                 pxNewQueue->ucQueueType = ucQueueType;\r
455         }\r
456         #endif /* configUSE_TRACE_FACILITY */\r
457 \r
458         #if( configUSE_QUEUE_SETS == 1 )\r
459         {\r
460                 pxNewQueue->pxQueueSetContainer = NULL;\r
461         }\r
462         #endif /* configUSE_QUEUE_SETS */\r
463 \r
464         traceQUEUE_CREATE( pxNewQueue );\r
465 }\r
466 /*-----------------------------------------------------------*/\r
467 \r
468 #if( configUSE_MUTEXES == 1 )\r
469 \r
470         static void prvInitialiseMutex( Queue_t *pxNewQueue )\r
471         {\r
472                 if( pxNewQueue != NULL )\r
473                 {\r
474                         /* The queue create function will set all the queue structure members\r
475                         correctly for a generic queue, but this function is creating a\r
476                         mutex.  Overwrite those members that need to be set differently -\r
477                         in particular the information required for priority inheritance. */\r
478                         pxNewQueue->u.xSemaphore.xMutexHolder = NULL;\r
479                         pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;\r
480 \r
481                         /* In case this is a recursive mutex. */\r
482                         pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;\r
483 \r
484                         traceCREATE_MUTEX( pxNewQueue );\r
485 \r
486                         /* Start with the semaphore in the expected state. */\r
487                         ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );\r
488                 }\r
489                 else\r
490                 {\r
491                         traceCREATE_MUTEX_FAILED();\r
492                 }\r
493         }\r
494 \r
495 #endif /* configUSE_MUTEXES */\r
496 /*-----------------------------------------------------------*/\r
497 \r
498 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
499 \r
500         QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )\r
501         {\r
502         QueueHandle_t xNewQueue;\r
503         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;\r
504 \r
505                 xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );\r
506                 prvInitialiseMutex( ( Queue_t * ) xNewQueue );\r
507 \r
508                 return xNewQueue;\r
509         }\r
510 \r
511 #endif /* configUSE_MUTEXES */\r
512 /*-----------------------------------------------------------*/\r
513 \r
514 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
515 \r
516         QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )\r
517         {\r
518         QueueHandle_t xNewQueue;\r
519         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;\r
520 \r
521                 /* Prevent compiler warnings about unused parameters if\r
522                 configUSE_TRACE_FACILITY does not equal 1. */\r
523                 ( void ) ucQueueType;\r
524 \r
525                 xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );\r
526                 prvInitialiseMutex( ( Queue_t * ) xNewQueue );\r
527 \r
528                 return xNewQueue;\r
529         }\r
530 \r
531 #endif /* configUSE_MUTEXES */\r
532 /*-----------------------------------------------------------*/\r
533 \r
534 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
535 \r
536         TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )\r
537         {\r
538         TaskHandle_t pxReturn;\r
539         Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;\r
540 \r
541                 /* This function is called by xSemaphoreGetMutexHolder(), and should not\r
542                 be called directly.  Note:  This is a good way of determining if the\r
543                 calling task is the mutex holder, but not a good way of determining the\r
544                 identity of the mutex holder, as the holder may change between the\r
545                 following critical section exiting and the function returning. */\r
546                 taskENTER_CRITICAL();\r
547                 {\r
548                         if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )\r
549                         {\r
550                                 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;\r
551                         }\r
552                         else\r
553                         {\r
554                                 pxReturn = NULL;\r
555                         }\r
556                 }\r
557                 taskEXIT_CRITICAL();\r
558 \r
559                 return pxReturn;\r
560         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
561 \r
562 #endif\r
563 /*-----------------------------------------------------------*/\r
564 \r
565 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )\r
566 \r
567         TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )\r
568         {\r
569         TaskHandle_t pxReturn;\r
570 \r
571                 configASSERT( xSemaphore );\r
572 \r
573                 /* Mutexes cannot be used in interrupt service routines, so the mutex\r
574                 holder should not change in an ISR, and therefore a critical section is\r
575                 not required here. */\r
576                 if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )\r
577                 {\r
578                         pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;\r
579                 }\r
580                 else\r
581                 {\r
582                         pxReturn = NULL;\r
583                 }\r
584 \r
585                 return pxReturn;\r
586         } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */\r
587 \r
588 #endif\r
589 /*-----------------------------------------------------------*/\r
590 \r
591 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
592 \r
593         BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )\r
594         {\r
595         BaseType_t xReturn;\r
596         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
597 \r
598                 configASSERT( pxMutex );\r
599 \r
600                 /* If this is the task that holds the mutex then xMutexHolder will not\r
601                 change outside of this task.  If this task does not hold the mutex then\r
602                 pxMutexHolder can never coincidentally equal the tasks handle, and as\r
603                 this is the only condition we are interested in it does not matter if\r
604                 pxMutexHolder is accessed simultaneously by another task.  Therefore no\r
605                 mutual exclusion is required to test the pxMutexHolder variable. */\r
606                 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )\r
607                 {\r
608                         traceGIVE_MUTEX_RECURSIVE( pxMutex );\r
609 \r
610                         /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to\r
611                         the task handle, therefore no underflow check is required.  Also,\r
612                         uxRecursiveCallCount is only modified by the mutex holder, and as\r
613                         there can only be one, no mutual exclusion is required to modify the\r
614                         uxRecursiveCallCount member. */\r
615                         ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;\r
616 \r
617                         /* Has the recursive call count unwound to 0? */\r
618                         if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )\r
619                         {\r
620                                 /* Return the mutex.  This will automatically unblock any other\r
621                                 task that might be waiting to access the mutex. */\r
622                                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );\r
623                         }\r
624                         else\r
625                         {\r
626                                 mtCOVERAGE_TEST_MARKER();\r
627                         }\r
628 \r
629                         xReturn = pdPASS;\r
630                 }\r
631                 else\r
632                 {\r
633                         /* The mutex cannot be given because the calling task is not the\r
634                         holder. */\r
635                         xReturn = pdFAIL;\r
636 \r
637                         traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
638                 }\r
639 \r
640                 return xReturn;\r
641         }\r
642 \r
643 #endif /* configUSE_RECURSIVE_MUTEXES */\r
644 /*-----------------------------------------------------------*/\r
645 \r
646 #if ( configUSE_RECURSIVE_MUTEXES == 1 )\r
647 \r
648         BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )\r
649         {\r
650         BaseType_t xReturn;\r
651         Queue_t * const pxMutex = ( Queue_t * ) xMutex;\r
652 \r
653                 configASSERT( pxMutex );\r
654 \r
655                 /* Comments regarding mutual exclusion as per those within\r
656                 xQueueGiveMutexRecursive(). */\r
657 \r
658                 traceTAKE_MUTEX_RECURSIVE( pxMutex );\r
659 \r
660                 if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )\r
661                 {\r
662                         ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;\r
663                         xReturn = pdPASS;\r
664                 }\r
665                 else\r
666                 {\r
667                         xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );\r
668 \r
669                         /* pdPASS will only be returned if the mutex was successfully\r
670                         obtained.  The calling task may have entered the Blocked state\r
671                         before reaching here. */\r
672                         if( xReturn != pdFAIL )\r
673                         {\r
674                                 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;\r
675                         }\r
676                         else\r
677                         {\r
678                                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );\r
679                         }\r
680                 }\r
681 \r
682                 return xReturn;\r
683         }\r
684 \r
685 #endif /* configUSE_RECURSIVE_MUTEXES */\r
686 /*-----------------------------------------------------------*/\r
687 \r
688 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
689 \r
690         QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )\r
691         {\r
692         QueueHandle_t xHandle;\r
693 \r
694                 configASSERT( uxMaxCount != 0 );\r
695                 configASSERT( uxInitialCount <= uxMaxCount );\r
696 \r
697                 xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
698 \r
699                 if( xHandle != NULL )\r
700                 {\r
701                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
702 \r
703                         traceCREATE_COUNTING_SEMAPHORE();\r
704                 }\r
705                 else\r
706                 {\r
707                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
708                 }\r
709 \r
710                 return xHandle;\r
711         }\r
712 \r
713 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */\r
714 /*-----------------------------------------------------------*/\r
715 \r
716 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
717 \r
718         QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )\r
719         {\r
720         QueueHandle_t xHandle;\r
721 \r
722                 configASSERT( uxMaxCount != 0 );\r
723                 configASSERT( uxInitialCount <= uxMaxCount );\r
724 \r
725                 xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );\r
726 \r
727                 if( xHandle != NULL )\r
728                 {\r
729                         ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;\r
730 \r
731                         traceCREATE_COUNTING_SEMAPHORE();\r
732                 }\r
733                 else\r
734                 {\r
735                         traceCREATE_COUNTING_SEMAPHORE_FAILED();\r
736                 }\r
737 \r
738                 return xHandle;\r
739         }\r
740 \r
741 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */\r
742 /*-----------------------------------------------------------*/\r
743 \r
744 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )\r
745 {\r
746 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;\r
747 TimeOut_t xTimeOut;\r
748 Queue_t * const pxQueue = xQueue;\r
749 \r
750         configASSERT( pxQueue );\r
751         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
752         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
753         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
754         {\r
755                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
756         }\r
757         #endif\r
758 \r
759 \r
760         /*lint -save -e904 This function relaxes the coding standard somewhat to\r
761         allow return statements within the function itself.  This is done in the\r
762         interest of execution time efficiency. */\r
763         for( ;; )\r
764         {\r
765                 taskENTER_CRITICAL();\r
766                 {\r
767                         /* Is there room on the queue now?  The running task must be the\r
768                         highest priority task wanting to access the queue.  If the head item\r
769                         in the queue is to be overwritten then it does not matter if the\r
770                         queue is full. */\r
771                         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
772                         {\r
773                                 traceQUEUE_SEND( pxQueue );\r
774 \r
775                                 #if ( configUSE_QUEUE_SETS == 1 )\r
776                                 {\r
777                                 const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;\r
778 \r
779                                         xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
780 \r
781                                         if( pxQueue->pxQueueSetContainer != NULL )\r
782                                         {\r
783                                                 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )\r
784                                                 {\r
785                                                         /* Do not notify the queue set as an existing item\r
786                                                         was overwritten in the queue so the number of items\r
787                                                         in the queue has not changed. */\r
788                                                         mtCOVERAGE_TEST_MARKER();\r
789                                                 }\r
790                                                 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )\r
791                                                 {\r
792                                                         /* The queue is a member of a queue set, and posting\r
793                                                         to the queue set caused a higher priority task to\r
794                                                         unblock. A context switch is required. */\r
795                                                         queueYIELD_IF_USING_PREEMPTION();\r
796                                                 }\r
797                                                 else\r
798                                                 {\r
799                                                         mtCOVERAGE_TEST_MARKER();\r
800                                                 }\r
801                                         }\r
802                                         else\r
803                                         {\r
804                                                 /* If there was a task waiting for data to arrive on the\r
805                                                 queue then unblock it now. */\r
806                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
807                                                 {\r
808                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
809                                                         {\r
810                                                                 /* The unblocked task has a priority higher than\r
811                                                                 our own so yield immediately.  Yes it is ok to\r
812                                                                 do this from within the critical section - the\r
813                                                                 kernel takes care of that. */\r
814                                                                 queueYIELD_IF_USING_PREEMPTION();\r
815                                                         }\r
816                                                         else\r
817                                                         {\r
818                                                                 mtCOVERAGE_TEST_MARKER();\r
819                                                         }\r
820                                                 }\r
821                                                 else if( xYieldRequired != pdFALSE )\r
822                                                 {\r
823                                                         /* This path is a special case that will only get\r
824                                                         executed if the task was holding multiple mutexes\r
825                                                         and the mutexes were given back in an order that is\r
826                                                         different to that in which they were taken. */\r
827                                                         queueYIELD_IF_USING_PREEMPTION();\r
828                                                 }\r
829                                                 else\r
830                                                 {\r
831                                                         mtCOVERAGE_TEST_MARKER();\r
832                                                 }\r
833                                         }\r
834                                 }\r
835                                 #else /* configUSE_QUEUE_SETS */\r
836                                 {\r
837                                         xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
838 \r
839                                         /* If there was a task waiting for data to arrive on the\r
840                                         queue then unblock it now. */\r
841                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
842                                         {\r
843                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
844                                                 {\r
845                                                         /* The unblocked task has a priority higher than\r
846                                                         our own so yield immediately.  Yes it is ok to do\r
847                                                         this from within the critical section - the kernel\r
848                                                         takes care of that. */\r
849                                                         queueYIELD_IF_USING_PREEMPTION();\r
850                                                 }\r
851                                                 else\r
852                                                 {\r
853                                                         mtCOVERAGE_TEST_MARKER();\r
854                                                 }\r
855                                         }\r
856                                         else if( xYieldRequired != pdFALSE )\r
857                                         {\r
858                                                 /* This path is a special case that will only get\r
859                                                 executed if the task was holding multiple mutexes and\r
860                                                 the mutexes were given back in an order that is\r
861                                                 different to that in which they were taken. */\r
862                                                 queueYIELD_IF_USING_PREEMPTION();\r
863                                         }\r
864                                         else\r
865                                         {\r
866                                                 mtCOVERAGE_TEST_MARKER();\r
867                                         }\r
868                                 }\r
869                                 #endif /* configUSE_QUEUE_SETS */\r
870 \r
871                                 taskEXIT_CRITICAL();\r
872                                 return pdPASS;\r
873                         }\r
874                         else\r
875                         {\r
876                                 if( xTicksToWait == ( TickType_t ) 0 )\r
877                                 {\r
878                                         /* The queue was full and no block time is specified (or\r
879                                         the block time has expired) so leave now. */\r
880                                         taskEXIT_CRITICAL();\r
881 \r
882                                         /* Return to the original privilege level before exiting\r
883                                         the function. */\r
884                                         traceQUEUE_SEND_FAILED( pxQueue );\r
885                                         return errQUEUE_FULL;\r
886                                 }\r
887                                 else if( xEntryTimeSet == pdFALSE )\r
888                                 {\r
889                                         /* The queue was full and a block time was specified so\r
890                                         configure the timeout structure. */\r
891                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
892                                         xEntryTimeSet = pdTRUE;\r
893                                 }\r
894                                 else\r
895                                 {\r
896                                         /* Entry time was already set. */\r
897                                         mtCOVERAGE_TEST_MARKER();\r
898                                 }\r
899                         }\r
900                 }\r
901                 taskEXIT_CRITICAL();\r
902 \r
903                 /* Interrupts and other tasks can send to and receive from the queue\r
904                 now the critical section has been exited. */\r
905 \r
906                 vTaskSuspendAll();\r
907                 prvLockQueue( pxQueue );\r
908 \r
909                 /* Update the timeout state to see if it has expired yet. */\r
910                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
911                 {\r
912                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
913                         {\r
914                                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );\r
915                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );\r
916 \r
917                                 /* Unlocking the queue means queue events can effect the\r
918                                 event list.  It is possible that interrupts occurring now\r
919                                 remove this task from the event list again - but as the\r
920                                 scheduler is suspended the task will go onto the pending\r
921                                 ready last instead of the actual ready list. */\r
922                                 prvUnlockQueue( pxQueue );\r
923 \r
924                                 /* Resuming the scheduler will move tasks from the pending\r
925                                 ready list into the ready list - so it is feasible that this\r
926                                 task is already in a ready list before it yields - in which\r
927                                 case the yield will not cause a context switch unless there\r
928                                 is also a higher priority task in the pending ready list. */\r
929                                 if( xTaskResumeAll() == pdFALSE )\r
930                                 {\r
931                                         portYIELD_WITHIN_API();\r
932                                 }\r
933                         }\r
934                         else\r
935                         {\r
936                                 /* Try again. */\r
937                                 prvUnlockQueue( pxQueue );\r
938                                 ( void ) xTaskResumeAll();\r
939                         }\r
940                 }\r
941                 else\r
942                 {\r
943                         /* The timeout has expired. */\r
944                         prvUnlockQueue( pxQueue );\r
945                         ( void ) xTaskResumeAll();\r
946 \r
947                         traceQUEUE_SEND_FAILED( pxQueue );\r
948                         return errQUEUE_FULL;\r
949                 }\r
950         } /*lint -restore */\r
951 }\r
952 /*-----------------------------------------------------------*/\r
953 \r
954 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )\r
955 {\r
956 BaseType_t xReturn;\r
957 UBaseType_t uxSavedInterruptStatus;\r
958 Queue_t * const pxQueue = xQueue;\r
959 \r
960         configASSERT( pxQueue );\r
961         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
962         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );\r
963 \r
964         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
965         system call (or maximum API call) interrupt priority.  Interrupts that are\r
966         above the maximum system call priority are kept permanently enabled, even\r
967         when the RTOS kernel is in a critical section, but cannot make any calls to\r
968         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
969         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
970         failure if a FreeRTOS API function is called from an interrupt that has been\r
971         assigned a priority above the configured maximum system call priority.\r
972         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
973         that have been assigned a priority at or (logically) below the maximum\r
974         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
975         safe API to ensure interrupt entry is as fast and as simple as possible.\r
976         More information (albeit Cortex-M specific) is provided on the following\r
977         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
978         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
979 \r
980         /* Similar to xQueueGenericSend, except without blocking if there is no room\r
981         in the queue.  Also don't directly wake a task that was blocked on a queue\r
982         read, instead return a flag to say whether a context switch is required or\r
983         not (i.e. has a task with a higher priority than us been woken by this\r
984         post). */\r
985         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
986         {\r
987                 if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )\r
988                 {\r
989                         const int8_t cTxLock = pxQueue->cTxLock;\r
990                         const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;\r
991 \r
992                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
993 \r
994                         /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a\r
995                         semaphore or mutex.  That means prvCopyDataToQueue() cannot result\r
996                         in a task disinheriting a priority and prvCopyDataToQueue() can be\r
997                         called here even though the disinherit function does not check if\r
998                         the scheduler is suspended before accessing the ready lists. */\r
999                         ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );\r
1000 \r
1001                         /* The event list is not altered if the queue is locked.  This will\r
1002                         be done when the queue is unlocked later. */\r
1003                         if( cTxLock == queueUNLOCKED )\r
1004                         {\r
1005                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1006                                 {\r
1007                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1008                                         {\r
1009                                                 if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )\r
1010                                                 {\r
1011                                                         /* Do not notify the queue set as an existing item\r
1012                                                         was overwritten in the queue so the number of items\r
1013                                                         in the queue has not changed. */\r
1014                                                         mtCOVERAGE_TEST_MARKER();\r
1015                                                 }\r
1016                                                 else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )\r
1017                                                 {\r
1018                                                         /* The queue is a member of a queue set, and posting\r
1019                                                         to the queue set caused a higher priority task to\r
1020                                                         unblock.  A context switch is required. */\r
1021                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1022                                                         {\r
1023                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1024                                                         }\r
1025                                                         else\r
1026                                                         {\r
1027                                                                 mtCOVERAGE_TEST_MARKER();\r
1028                                                         }\r
1029                                                 }\r
1030                                                 else\r
1031                                                 {\r
1032                                                         mtCOVERAGE_TEST_MARKER();\r
1033                                                 }\r
1034                                         }\r
1035                                         else\r
1036                                         {\r
1037                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1038                                                 {\r
1039                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1040                                                         {\r
1041                                                                 /* The task waiting has a higher priority so\r
1042                                                                 record that a context switch is required. */\r
1043                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1044                                                                 {\r
1045                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1046                                                                 }\r
1047                                                                 else\r
1048                                                                 {\r
1049                                                                         mtCOVERAGE_TEST_MARKER();\r
1050                                                                 }\r
1051                                                         }\r
1052                                                         else\r
1053                                                         {\r
1054                                                                 mtCOVERAGE_TEST_MARKER();\r
1055                                                         }\r
1056                                                 }\r
1057                                                 else\r
1058                                                 {\r
1059                                                         mtCOVERAGE_TEST_MARKER();\r
1060                                                 }\r
1061                                         }\r
1062                                 }\r
1063                                 #else /* configUSE_QUEUE_SETS */\r
1064                                 {\r
1065                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1066                                         {\r
1067                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1068                                                 {\r
1069                                                         /* The task waiting has a higher priority so record that a\r
1070                                                         context switch is required. */\r
1071                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1072                                                         {\r
1073                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1074                                                         }\r
1075                                                         else\r
1076                                                         {\r
1077                                                                 mtCOVERAGE_TEST_MARKER();\r
1078                                                         }\r
1079                                                 }\r
1080                                                 else\r
1081                                                 {\r
1082                                                         mtCOVERAGE_TEST_MARKER();\r
1083                                                 }\r
1084                                         }\r
1085                                         else\r
1086                                         {\r
1087                                                 mtCOVERAGE_TEST_MARKER();\r
1088                                         }\r
1089 \r
1090                                         /* Not used in this path. */\r
1091                                         ( void ) uxPreviousMessagesWaiting;\r
1092                                 }\r
1093                                 #endif /* configUSE_QUEUE_SETS */\r
1094                         }\r
1095                         else\r
1096                         {\r
1097                                 /* Increment the lock count so the task that unlocks the queue\r
1098                                 knows that data was posted while it was locked. */\r
1099                                 configASSERT( cTxLock != queueINT8_MAX);\r
1100 \r
1101                                 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
1102                         }\r
1103 \r
1104                         xReturn = pdPASS;\r
1105                 }\r
1106                 else\r
1107                 {\r
1108                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1109                         xReturn = errQUEUE_FULL;\r
1110                 }\r
1111         }\r
1112         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1113 \r
1114         return xReturn;\r
1115 }\r
1116 /*-----------------------------------------------------------*/\r
1117 \r
1118 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )\r
1119 {\r
1120 BaseType_t xReturn;\r
1121 UBaseType_t uxSavedInterruptStatus;\r
1122 Queue_t * const pxQueue = xQueue;\r
1123 \r
1124         /* Similar to xQueueGenericSendFromISR() but used with semaphores where the\r
1125         item size is 0.  Don't directly wake a task that was blocked on a queue\r
1126         read, instead return a flag to say whether a context switch is required or\r
1127         not (i.e. has a task with a higher priority than us been woken by this\r
1128         post). */\r
1129 \r
1130         configASSERT( pxQueue );\r
1131 \r
1132         /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()\r
1133         if the item size is not 0. */\r
1134         configASSERT( pxQueue->uxItemSize == 0 );\r
1135 \r
1136         /* Normally a mutex would not be given from an interrupt, especially if\r
1137         there is a mutex holder, as priority inheritance makes no sense for an\r
1138         interrupts, only tasks. */\r
1139         configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );\r
1140 \r
1141         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1142         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1143         above the maximum system call priority are kept permanently enabled, even\r
1144         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1145         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1146         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1147         failure if a FreeRTOS API function is called from an interrupt that has been\r
1148         assigned a priority above the configured maximum system call priority.\r
1149         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1150         that have been assigned a priority at or (logically) below the maximum\r
1151         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1152         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1153         More information (albeit Cortex-M specific) is provided on the following\r
1154         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1155         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1156 \r
1157         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1158         {\r
1159                 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1160 \r
1161                 /* When the queue is used to implement a semaphore no data is ever\r
1162                 moved through the queue but it is still valid to see if the queue 'has\r
1163                 space'. */\r
1164                 if( uxMessagesWaiting < pxQueue->uxLength )\r
1165                 {\r
1166                         const int8_t cTxLock = pxQueue->cTxLock;\r
1167 \r
1168                         traceQUEUE_SEND_FROM_ISR( pxQueue );\r
1169 \r
1170                         /* A task can only have an inherited priority if it is a mutex\r
1171                         holder - and if there is a mutex holder then the mutex cannot be\r
1172                         given from an ISR.  As this is the ISR version of the function it\r
1173                         can be assumed there is no mutex holder and no need to determine if\r
1174                         priority disinheritance is needed.  Simply increase the count of\r
1175                         messages (semaphores) available. */\r
1176                         pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;\r
1177 \r
1178                         /* The event list is not altered if the queue is locked.  This will\r
1179                         be done when the queue is unlocked later. */\r
1180                         if( cTxLock == queueUNLOCKED )\r
1181                         {\r
1182                                 #if ( configUSE_QUEUE_SETS == 1 )\r
1183                                 {\r
1184                                         if( pxQueue->pxQueueSetContainer != NULL )\r
1185                                         {\r
1186                                                 if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )\r
1187                                                 {\r
1188                                                         /* The semaphore is a member of a queue set, and\r
1189                                                         posting to the queue set caused a higher priority\r
1190                                                         task to unblock.  A context switch is required. */\r
1191                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1192                                                         {\r
1193                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1194                                                         }\r
1195                                                         else\r
1196                                                         {\r
1197                                                                 mtCOVERAGE_TEST_MARKER();\r
1198                                                         }\r
1199                                                 }\r
1200                                                 else\r
1201                                                 {\r
1202                                                         mtCOVERAGE_TEST_MARKER();\r
1203                                                 }\r
1204                                         }\r
1205                                         else\r
1206                                         {\r
1207                                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1208                                                 {\r
1209                                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1210                                                         {\r
1211                                                                 /* The task waiting has a higher priority so\r
1212                                                                 record that a context switch is required. */\r
1213                                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1214                                                                 {\r
1215                                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1216                                                                 }\r
1217                                                                 else\r
1218                                                                 {\r
1219                                                                         mtCOVERAGE_TEST_MARKER();\r
1220                                                                 }\r
1221                                                         }\r
1222                                                         else\r
1223                                                         {\r
1224                                                                 mtCOVERAGE_TEST_MARKER();\r
1225                                                         }\r
1226                                                 }\r
1227                                                 else\r
1228                                                 {\r
1229                                                         mtCOVERAGE_TEST_MARKER();\r
1230                                                 }\r
1231                                         }\r
1232                                 }\r
1233                                 #else /* configUSE_QUEUE_SETS */\r
1234                                 {\r
1235                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1236                                         {\r
1237                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1238                                                 {\r
1239                                                         /* The task waiting has a higher priority so record that a\r
1240                                                         context switch is required. */\r
1241                                                         if( pxHigherPriorityTaskWoken != NULL )\r
1242                                                         {\r
1243                                                                 *pxHigherPriorityTaskWoken = pdTRUE;\r
1244                                                         }\r
1245                                                         else\r
1246                                                         {\r
1247                                                                 mtCOVERAGE_TEST_MARKER();\r
1248                                                         }\r
1249                                                 }\r
1250                                                 else\r
1251                                                 {\r
1252                                                         mtCOVERAGE_TEST_MARKER();\r
1253                                                 }\r
1254                                         }\r
1255                                         else\r
1256                                         {\r
1257                                                 mtCOVERAGE_TEST_MARKER();\r
1258                                         }\r
1259                                 }\r
1260                                 #endif /* configUSE_QUEUE_SETS */\r
1261                         }\r
1262                         else\r
1263                         {\r
1264                                 /* Increment the lock count so the task that unlocks the queue\r
1265                                 knows that data was posted while it was locked. */\r
1266                                 configASSERT( cTxLock != queueINT8_MAX);\r
1267 \r
1268                                 pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
1269                         }\r
1270 \r
1271                         xReturn = pdPASS;\r
1272                 }\r
1273                 else\r
1274                 {\r
1275                         traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );\r
1276                         xReturn = errQUEUE_FULL;\r
1277                 }\r
1278         }\r
1279         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1280 \r
1281         return xReturn;\r
1282 }\r
1283 /*-----------------------------------------------------------*/\r
1284 \r
1285 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )\r
1286 {\r
1287 BaseType_t xEntryTimeSet = pdFALSE;\r
1288 TimeOut_t xTimeOut;\r
1289 Queue_t * const pxQueue = xQueue;\r
1290 \r
1291         /* Check the pointer is not NULL. */\r
1292         configASSERT( ( pxQueue ) );\r
1293 \r
1294         /* The buffer into which data is received can only be NULL if the data size\r
1295         is zero (so no data is copied into the buffer). */\r
1296         configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1297 \r
1298         /* Cannot block if the scheduler is suspended. */\r
1299         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1300         {\r
1301                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1302         }\r
1303         #endif\r
1304 \r
1305 \r
1306         /*lint -save -e904  This function relaxes the coding standard somewhat to\r
1307         allow return statements within the function itself.  This is done in the\r
1308         interest of execution time efficiency. */\r
1309         for( ;; )\r
1310         {\r
1311                 taskENTER_CRITICAL();\r
1312                 {\r
1313                         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1314 \r
1315                         /* Is there data in the queue now?  To be running the calling task\r
1316                         must be the highest priority task wanting to access the queue. */\r
1317                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1318                         {\r
1319                                 /* Data available, remove one item. */\r
1320                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1321                                 traceQUEUE_RECEIVE( pxQueue );\r
1322                                 pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;\r
1323 \r
1324                                 /* There is now space in the queue, were any tasks waiting to\r
1325                                 post to the queue?  If so, unblock the highest priority waiting\r
1326                                 task. */\r
1327                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1328                                 {\r
1329                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1330                                         {\r
1331                                                 queueYIELD_IF_USING_PREEMPTION();\r
1332                                         }\r
1333                                         else\r
1334                                         {\r
1335                                                 mtCOVERAGE_TEST_MARKER();\r
1336                                         }\r
1337                                 }\r
1338                                 else\r
1339                                 {\r
1340                                         mtCOVERAGE_TEST_MARKER();\r
1341                                 }\r
1342 \r
1343                                 taskEXIT_CRITICAL();\r
1344                                 return pdPASS;\r
1345                         }\r
1346                         else\r
1347                         {\r
1348                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1349                                 {\r
1350                                         /* The queue was empty and no block time is specified (or\r
1351                                         the block time has expired) so leave now. */\r
1352                                         taskEXIT_CRITICAL();\r
1353                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1354                                         return errQUEUE_EMPTY;\r
1355                                 }\r
1356                                 else if( xEntryTimeSet == pdFALSE )\r
1357                                 {\r
1358                                         /* The queue was empty and a block time was specified so\r
1359                                         configure the timeout structure. */\r
1360                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
1361                                         xEntryTimeSet = pdTRUE;\r
1362                                 }\r
1363                                 else\r
1364                                 {\r
1365                                         /* Entry time was already set. */\r
1366                                         mtCOVERAGE_TEST_MARKER();\r
1367                                 }\r
1368                         }\r
1369                 }\r
1370                 taskEXIT_CRITICAL();\r
1371 \r
1372                 /* Interrupts and other tasks can send to and receive from the queue\r
1373                 now the critical section has been exited. */\r
1374 \r
1375                 vTaskSuspendAll();\r
1376                 prvLockQueue( pxQueue );\r
1377 \r
1378                 /* Update the timeout state to see if it has expired yet. */\r
1379                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1380                 {\r
1381                         /* The timeout has not expired.  If the queue is still empty place\r
1382                         the task on the list of tasks waiting to receive from the queue. */\r
1383                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1384                         {\r
1385                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1386                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1387                                 prvUnlockQueue( pxQueue );\r
1388                                 if( xTaskResumeAll() == pdFALSE )\r
1389                                 {\r
1390                                         portYIELD_WITHIN_API();\r
1391                                 }\r
1392                                 else\r
1393                                 {\r
1394                                         mtCOVERAGE_TEST_MARKER();\r
1395                                 }\r
1396                         }\r
1397                         else\r
1398                         {\r
1399                                 /* The queue contains data again.  Loop back to try and read the\r
1400                                 data. */\r
1401                                 prvUnlockQueue( pxQueue );\r
1402                                 ( void ) xTaskResumeAll();\r
1403                         }\r
1404                 }\r
1405                 else\r
1406                 {\r
1407                         /* Timed out.  If there is no data in the queue exit, otherwise loop\r
1408                         back and attempt to read the data. */\r
1409                         prvUnlockQueue( pxQueue );\r
1410                         ( void ) xTaskResumeAll();\r
1411 \r
1412                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1413                         {\r
1414                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1415                                 return errQUEUE_EMPTY;\r
1416                         }\r
1417                         else\r
1418                         {\r
1419                                 mtCOVERAGE_TEST_MARKER();\r
1420                         }\r
1421                 }\r
1422         } /*lint -restore */\r
1423 }\r
1424 /*-----------------------------------------------------------*/\r
1425 \r
1426 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )\r
1427 {\r
1428 BaseType_t xEntryTimeSet = pdFALSE;\r
1429 TimeOut_t xTimeOut;\r
1430 Queue_t * const pxQueue = xQueue;\r
1431 \r
1432 #if( configUSE_MUTEXES == 1 )\r
1433         BaseType_t xInheritanceOccurred = pdFALSE;\r
1434 #endif\r
1435 \r
1436         /* Check the queue pointer is not NULL. */\r
1437         configASSERT( ( pxQueue ) );\r
1438 \r
1439         /* Check this really is a semaphore, in which case the item size will be\r
1440         0. */\r
1441         configASSERT( pxQueue->uxItemSize == 0 );\r
1442 \r
1443         /* Cannot block if the scheduler is suspended. */\r
1444         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1445         {\r
1446                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1447         }\r
1448         #endif\r
1449 \r
1450 \r
1451         /*lint -save -e904 This function relaxes the coding standard somewhat to allow return\r
1452         statements within the function itself.  This is done in the interest\r
1453         of execution time efficiency. */\r
1454         for( ;; )\r
1455         {\r
1456                 taskENTER_CRITICAL();\r
1457                 {\r
1458                         /* Semaphores are queues with an item size of 0, and where the\r
1459                         number of messages in the queue is the semaphore's count value. */\r
1460                         const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;\r
1461 \r
1462                         /* Is there data in the queue now?  To be running the calling task\r
1463                         must be the highest priority task wanting to access the queue. */\r
1464                         if( uxSemaphoreCount > ( UBaseType_t ) 0 )\r
1465                         {\r
1466                                 traceQUEUE_RECEIVE( pxQueue );\r
1467 \r
1468                                 /* Semaphores are queues with a data size of zero and where the\r
1469                                 messages waiting is the semaphore's count.  Reduce the count. */\r
1470                                 pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;\r
1471 \r
1472                                 #if ( configUSE_MUTEXES == 1 )\r
1473                                 {\r
1474                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1475                                         {\r
1476                                                 /* Record the information required to implement\r
1477                                                 priority inheritance should it become necessary. */\r
1478                                                 pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();\r
1479                                         }\r
1480                                         else\r
1481                                         {\r
1482                                                 mtCOVERAGE_TEST_MARKER();\r
1483                                         }\r
1484                                 }\r
1485                                 #endif /* configUSE_MUTEXES */\r
1486 \r
1487                                 /* Check to see if other tasks are blocked waiting to give the\r
1488                                 semaphore, and if so, unblock the highest priority such task. */\r
1489                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1490                                 {\r
1491                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1492                                         {\r
1493                                                 queueYIELD_IF_USING_PREEMPTION();\r
1494                                         }\r
1495                                         else\r
1496                                         {\r
1497                                                 mtCOVERAGE_TEST_MARKER();\r
1498                                         }\r
1499                                 }\r
1500                                 else\r
1501                                 {\r
1502                                         mtCOVERAGE_TEST_MARKER();\r
1503                                 }\r
1504 \r
1505                                 taskEXIT_CRITICAL();\r
1506                                 return pdPASS;\r
1507                         }\r
1508                         else\r
1509                         {\r
1510                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1511                                 {\r
1512                                         /* For inheritance to have occurred there must have been an\r
1513                                         initial timeout, and an adjusted timeout cannot become 0, as\r
1514                                         if it were 0 the function would have exited. */\r
1515                                         #if( configUSE_MUTEXES == 1 )\r
1516                                         {\r
1517                                                 configASSERT( xInheritanceOccurred == pdFALSE );\r
1518                                         }\r
1519                                         #endif /* configUSE_MUTEXES */\r
1520 \r
1521                                         /* The semaphore count was 0 and no block time is specified\r
1522                                         (or the block time has expired) so exit now. */\r
1523                                         taskEXIT_CRITICAL();\r
1524                                         traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1525                                         return errQUEUE_EMPTY;\r
1526                                 }\r
1527                                 else if( xEntryTimeSet == pdFALSE )\r
1528                                 {\r
1529                                         /* The semaphore count was 0 and a block time was specified\r
1530                                         so configure the timeout structure ready to block. */\r
1531                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
1532                                         xEntryTimeSet = pdTRUE;\r
1533                                 }\r
1534                                 else\r
1535                                 {\r
1536                                         /* Entry time was already set. */\r
1537                                         mtCOVERAGE_TEST_MARKER();\r
1538                                 }\r
1539                         }\r
1540                 }\r
1541                 taskEXIT_CRITICAL();\r
1542 \r
1543                 /* Interrupts and other tasks can give to and take from the semaphore\r
1544                 now the critical section has been exited. */\r
1545 \r
1546                 vTaskSuspendAll();\r
1547                 prvLockQueue( pxQueue );\r
1548 \r
1549                 /* Update the timeout state to see if it has expired yet. */\r
1550                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1551                 {\r
1552                         /* A block time is specified and not expired.  If the semaphore\r
1553                         count is 0 then enter the Blocked state to wait for a semaphore to\r
1554                         become available.  As semaphores are implemented with queues the\r
1555                         queue being empty is equivalent to the semaphore count being 0. */\r
1556                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1557                         {\r
1558                                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );\r
1559 \r
1560                                 #if ( configUSE_MUTEXES == 1 )\r
1561                                 {\r
1562                                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
1563                                         {\r
1564                                                 taskENTER_CRITICAL();\r
1565                                                 {\r
1566                                                         xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );\r
1567                                                 }\r
1568                                                 taskEXIT_CRITICAL();\r
1569                                         }\r
1570                                         else\r
1571                                         {\r
1572                                                 mtCOVERAGE_TEST_MARKER();\r
1573                                         }\r
1574                                 }\r
1575                                 #endif\r
1576 \r
1577                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1578                                 prvUnlockQueue( pxQueue );\r
1579                                 if( xTaskResumeAll() == pdFALSE )\r
1580                                 {\r
1581                                         portYIELD_WITHIN_API();\r
1582                                 }\r
1583                                 else\r
1584                                 {\r
1585                                         mtCOVERAGE_TEST_MARKER();\r
1586                                 }\r
1587                         }\r
1588                         else\r
1589                         {\r
1590                                 /* There was no timeout and the semaphore count was not 0, so\r
1591                                 attempt to take the semaphore again. */\r
1592                                 prvUnlockQueue( pxQueue );\r
1593                                 ( void ) xTaskResumeAll();\r
1594                         }\r
1595                 }\r
1596                 else\r
1597                 {\r
1598                         /* Timed out. */\r
1599                         prvUnlockQueue( pxQueue );\r
1600                         ( void ) xTaskResumeAll();\r
1601 \r
1602                         /* If the semaphore count is 0 exit now as the timeout has\r
1603                         expired.  Otherwise return to attempt to take the semaphore that is\r
1604                         known to be available.  As semaphores are implemented by queues the\r
1605                         queue being empty is equivalent to the semaphore count being 0. */\r
1606                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1607                         {\r
1608                                 #if ( configUSE_MUTEXES == 1 )\r
1609                                 {\r
1610                                         /* xInheritanceOccurred could only have be set if\r
1611                                         pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to\r
1612                                         test the mutex type again to check it is actually a mutex. */\r
1613                                         if( xInheritanceOccurred != pdFALSE )\r
1614                                         {\r
1615                                                 taskENTER_CRITICAL();\r
1616                                                 {\r
1617                                                         UBaseType_t uxHighestWaitingPriority;\r
1618 \r
1619                                                         /* This task blocking on the mutex caused another\r
1620                                                         task to inherit this task's priority.  Now this task\r
1621                                                         has timed out the priority should be disinherited\r
1622                                                         again, but only as low as the next highest priority\r
1623                                                         task that is waiting for the same mutex. */\r
1624                                                         uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );\r
1625                                                         vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );\r
1626                                                 }\r
1627                                                 taskEXIT_CRITICAL();\r
1628                                         }\r
1629                                 }\r
1630                                 #endif /* configUSE_MUTEXES */\r
1631 \r
1632                                 traceQUEUE_RECEIVE_FAILED( pxQueue );\r
1633                                 return errQUEUE_EMPTY;\r
1634                         }\r
1635                         else\r
1636                         {\r
1637                                 mtCOVERAGE_TEST_MARKER();\r
1638                         }\r
1639                 }\r
1640         } /*lint -restore */\r
1641 }\r
1642 /*-----------------------------------------------------------*/\r
1643 \r
1644 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )\r
1645 {\r
1646 BaseType_t xEntryTimeSet = pdFALSE;\r
1647 TimeOut_t xTimeOut;\r
1648 int8_t *pcOriginalReadPosition;\r
1649 Queue_t * const pxQueue = xQueue;\r
1650 \r
1651         /* Check the pointer is not NULL. */\r
1652         configASSERT( ( pxQueue ) );\r
1653 \r
1654         /* The buffer into which data is received can only be NULL if the data size\r
1655         is zero (so no data is copied into the buffer. */\r
1656         configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1657 \r
1658         /* Cannot block if the scheduler is suspended. */\r
1659         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )\r
1660         {\r
1661                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );\r
1662         }\r
1663         #endif\r
1664 \r
1665 \r
1666         /*lint -save -e904  This function relaxes the coding standard somewhat to\r
1667         allow return statements within the function itself.  This is done in the\r
1668         interest of execution time efficiency. */\r
1669         for( ;; )\r
1670         {\r
1671                 taskENTER_CRITICAL();\r
1672                 {\r
1673                         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1674 \r
1675                         /* Is there data in the queue now?  To be running the calling task\r
1676                         must be the highest priority task wanting to access the queue. */\r
1677                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1678                         {\r
1679                                 /* Remember the read position so it can be reset after the data\r
1680                                 is read from the queue as this function is only peeking the\r
1681                                 data, not removing it. */\r
1682                                 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;\r
1683 \r
1684                                 prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1685                                 traceQUEUE_PEEK( pxQueue );\r
1686 \r
1687                                 /* The data is not being removed, so reset the read pointer. */\r
1688                                 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;\r
1689 \r
1690                                 /* The data is being left in the queue, so see if there are\r
1691                                 any other tasks waiting for the data. */\r
1692                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
1693                                 {\r
1694                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
1695                                         {\r
1696                                                 /* The task waiting has a higher priority than this task. */\r
1697                                                 queueYIELD_IF_USING_PREEMPTION();\r
1698                                         }\r
1699                                         else\r
1700                                         {\r
1701                                                 mtCOVERAGE_TEST_MARKER();\r
1702                                         }\r
1703                                 }\r
1704                                 else\r
1705                                 {\r
1706                                         mtCOVERAGE_TEST_MARKER();\r
1707                                 }\r
1708 \r
1709                                 taskEXIT_CRITICAL();\r
1710                                 return pdPASS;\r
1711                         }\r
1712                         else\r
1713                         {\r
1714                                 if( xTicksToWait == ( TickType_t ) 0 )\r
1715                                 {\r
1716                                         /* The queue was empty and no block time is specified (or\r
1717                                         the block time has expired) so leave now. */\r
1718                                         taskEXIT_CRITICAL();\r
1719                                         traceQUEUE_PEEK_FAILED( pxQueue );\r
1720                                         return errQUEUE_EMPTY;\r
1721                                 }\r
1722                                 else if( xEntryTimeSet == pdFALSE )\r
1723                                 {\r
1724                                         /* The queue was empty and a block time was specified so\r
1725                                         configure the timeout structure ready to enter the blocked\r
1726                                         state. */\r
1727                                         vTaskInternalSetTimeOutState( &xTimeOut );\r
1728                                         xEntryTimeSet = pdTRUE;\r
1729                                 }\r
1730                                 else\r
1731                                 {\r
1732                                         /* Entry time was already set. */\r
1733                                         mtCOVERAGE_TEST_MARKER();\r
1734                                 }\r
1735                         }\r
1736                 }\r
1737                 taskEXIT_CRITICAL();\r
1738 \r
1739                 /* Interrupts and other tasks can send to and receive from the queue\r
1740                 now the critical section has been exited. */\r
1741 \r
1742                 vTaskSuspendAll();\r
1743                 prvLockQueue( pxQueue );\r
1744 \r
1745                 /* Update the timeout state to see if it has expired yet. */\r
1746                 if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )\r
1747                 {\r
1748                         /* Timeout has not expired yet, check to see if there is data in the\r
1749                         queue now, and if not enter the Blocked state to wait for data. */\r
1750                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1751                         {\r
1752                                 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );\r
1753                                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );\r
1754                                 prvUnlockQueue( pxQueue );\r
1755                                 if( xTaskResumeAll() == pdFALSE )\r
1756                                 {\r
1757                                         portYIELD_WITHIN_API();\r
1758                                 }\r
1759                                 else\r
1760                                 {\r
1761                                         mtCOVERAGE_TEST_MARKER();\r
1762                                 }\r
1763                         }\r
1764                         else\r
1765                         {\r
1766                                 /* There is data in the queue now, so don't enter the blocked\r
1767                                 state, instead return to try and obtain the data. */\r
1768                                 prvUnlockQueue( pxQueue );\r
1769                                 ( void ) xTaskResumeAll();\r
1770                         }\r
1771                 }\r
1772                 else\r
1773                 {\r
1774                         /* The timeout has expired.  If there is still no data in the queue\r
1775                         exit, otherwise go back and try to read the data again. */\r
1776                         prvUnlockQueue( pxQueue );\r
1777                         ( void ) xTaskResumeAll();\r
1778 \r
1779                         if( prvIsQueueEmpty( pxQueue ) != pdFALSE )\r
1780                         {\r
1781                                 traceQUEUE_PEEK_FAILED( pxQueue );\r
1782                                 return errQUEUE_EMPTY;\r
1783                         }\r
1784                         else\r
1785                         {\r
1786                                 mtCOVERAGE_TEST_MARKER();\r
1787                         }\r
1788                 }\r
1789         } /*lint -restore */\r
1790 }\r
1791 /*-----------------------------------------------------------*/\r
1792 \r
1793 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )\r
1794 {\r
1795 BaseType_t xReturn;\r
1796 UBaseType_t uxSavedInterruptStatus;\r
1797 Queue_t * const pxQueue = xQueue;\r
1798 \r
1799         configASSERT( pxQueue );\r
1800         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1801 \r
1802         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1803         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1804         above the maximum system call priority are kept permanently enabled, even\r
1805         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1806         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1807         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1808         failure if a FreeRTOS API function is called from an interrupt that has been\r
1809         assigned a priority above the configured maximum system call priority.\r
1810         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1811         that have been assigned a priority at or (logically) below the maximum\r
1812         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1813         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1814         More information (albeit Cortex-M specific) is provided on the following\r
1815         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1816         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1817 \r
1818         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1819         {\r
1820                 const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
1821 \r
1822                 /* Cannot block in an ISR, so check there is data available. */\r
1823                 if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1824                 {\r
1825                         const int8_t cRxLock = pxQueue->cRxLock;\r
1826 \r
1827                         traceQUEUE_RECEIVE_FROM_ISR( pxQueue );\r
1828 \r
1829                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1830                         pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;\r
1831 \r
1832                         /* If the queue is locked the event list will not be modified.\r
1833                         Instead update the lock count so the task that unlocks the queue\r
1834                         will know that an ISR has removed data while the queue was\r
1835                         locked. */\r
1836                         if( cRxLock == queueUNLOCKED )\r
1837                         {\r
1838                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
1839                                 {\r
1840                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
1841                                         {\r
1842                                                 /* The task waiting has a higher priority than us so\r
1843                                                 force a context switch. */\r
1844                                                 if( pxHigherPriorityTaskWoken != NULL )\r
1845                                                 {\r
1846                                                         *pxHigherPriorityTaskWoken = pdTRUE;\r
1847                                                 }\r
1848                                                 else\r
1849                                                 {\r
1850                                                         mtCOVERAGE_TEST_MARKER();\r
1851                                                 }\r
1852                                         }\r
1853                                         else\r
1854                                         {\r
1855                                                 mtCOVERAGE_TEST_MARKER();\r
1856                                         }\r
1857                                 }\r
1858                                 else\r
1859                                 {\r
1860                                         mtCOVERAGE_TEST_MARKER();\r
1861                                 }\r
1862                         }\r
1863                         else\r
1864                         {\r
1865                                 /* Increment the lock count so the task that unlocks the queue\r
1866                                 knows that data was removed while it was locked. */\r
1867                                 configASSERT( cRxLock != queueINT8_MAX);\r
1868 \r
1869                                 pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );\r
1870                         }\r
1871 \r
1872                         xReturn = pdPASS;\r
1873                 }\r
1874                 else\r
1875                 {\r
1876                         xReturn = pdFAIL;\r
1877                         traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );\r
1878                 }\r
1879         }\r
1880         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1881 \r
1882         return xReturn;\r
1883 }\r
1884 /*-----------------------------------------------------------*/\r
1885 \r
1886 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )\r
1887 {\r
1888 BaseType_t xReturn;\r
1889 UBaseType_t uxSavedInterruptStatus;\r
1890 int8_t *pcOriginalReadPosition;\r
1891 Queue_t * const pxQueue = xQueue;\r
1892 \r
1893         configASSERT( pxQueue );\r
1894         configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );\r
1895         configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */\r
1896 \r
1897         /* RTOS ports that support interrupt nesting have the concept of a maximum\r
1898         system call (or maximum API call) interrupt priority.  Interrupts that are\r
1899         above the maximum system call priority are kept permanently enabled, even\r
1900         when the RTOS kernel is in a critical section, but cannot make any calls to\r
1901         FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h\r
1902         then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion\r
1903         failure if a FreeRTOS API function is called from an interrupt that has been\r
1904         assigned a priority above the configured maximum system call priority.\r
1905         Only FreeRTOS functions that end in FromISR can be called from interrupts\r
1906         that have been assigned a priority at or (logically) below the maximum\r
1907         system call     interrupt priority.  FreeRTOS maintains a separate interrupt\r
1908         safe API to ensure interrupt entry is as fast and as simple as possible.\r
1909         More information (albeit Cortex-M specific) is provided on the following\r
1910         link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */\r
1911         portASSERT_IF_INTERRUPT_PRIORITY_INVALID();\r
1912 \r
1913         uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();\r
1914         {\r
1915                 /* Cannot block in an ISR, so check there is data available. */\r
1916                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
1917                 {\r
1918                         traceQUEUE_PEEK_FROM_ISR( pxQueue );\r
1919 \r
1920                         /* Remember the read position so it can be reset as nothing is\r
1921                         actually being removed from the queue. */\r
1922                         pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;\r
1923                         prvCopyDataFromQueue( pxQueue, pvBuffer );\r
1924                         pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;\r
1925 \r
1926                         xReturn = pdPASS;\r
1927                 }\r
1928                 else\r
1929                 {\r
1930                         xReturn = pdFAIL;\r
1931                         traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );\r
1932                 }\r
1933         }\r
1934         portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );\r
1935 \r
1936         return xReturn;\r
1937 }\r
1938 /*-----------------------------------------------------------*/\r
1939 \r
1940 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )\r
1941 {\r
1942 UBaseType_t uxReturn;\r
1943 \r
1944         configASSERT( xQueue );\r
1945 \r
1946         taskENTER_CRITICAL();\r
1947         {\r
1948                 uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;\r
1949         }\r
1950         taskEXIT_CRITICAL();\r
1951 \r
1952         return uxReturn;\r
1953 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1954 /*-----------------------------------------------------------*/\r
1955 \r
1956 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )\r
1957 {\r
1958 UBaseType_t uxReturn;\r
1959 Queue_t * const pxQueue = xQueue;\r
1960 \r
1961         configASSERT( pxQueue );\r
1962 \r
1963         taskENTER_CRITICAL();\r
1964         {\r
1965                 uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;\r
1966         }\r
1967         taskEXIT_CRITICAL();\r
1968 \r
1969         return uxReturn;\r
1970 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1971 /*-----------------------------------------------------------*/\r
1972 \r
1973 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )\r
1974 {\r
1975 UBaseType_t uxReturn;\r
1976 Queue_t * const pxQueue = xQueue;\r
1977 \r
1978         configASSERT( pxQueue );\r
1979         uxReturn = pxQueue->uxMessagesWaiting;\r
1980 \r
1981         return uxReturn;\r
1982 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */\r
1983 /*-----------------------------------------------------------*/\r
1984 \r
1985 void vQueueDelete( QueueHandle_t xQueue )\r
1986 {\r
1987 Queue_t * const pxQueue = xQueue;\r
1988 \r
1989         configASSERT( pxQueue );\r
1990         traceQUEUE_DELETE( pxQueue );\r
1991 \r
1992         #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
1993         {\r
1994                 vQueueUnregisterQueue( pxQueue );\r
1995         }\r
1996         #endif\r
1997 \r
1998         #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )\r
1999         {\r
2000                 /* The queue can only have been allocated dynamically - free it\r
2001                 again. */\r
2002                 vPortFree( pxQueue );\r
2003         }\r
2004         #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )\r
2005         {\r
2006                 /* The queue could have been allocated statically or dynamically, so\r
2007                 check before attempting to free the memory. */\r
2008                 if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )\r
2009                 {\r
2010                         vPortFree( pxQueue );\r
2011                 }\r
2012                 else\r
2013                 {\r
2014                         mtCOVERAGE_TEST_MARKER();\r
2015                 }\r
2016         }\r
2017         #else\r
2018         {\r
2019                 /* The queue must have been statically allocated, so is not going to be\r
2020                 deleted.  Avoid compiler warnings about the unused parameter. */\r
2021                 ( void ) pxQueue;\r
2022         }\r
2023         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */\r
2024 }\r
2025 /*-----------------------------------------------------------*/\r
2026 \r
2027 #if ( configUSE_TRACE_FACILITY == 1 )\r
2028 \r
2029         UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )\r
2030         {\r
2031                 return ( ( Queue_t * ) xQueue )->uxQueueNumber;\r
2032         }\r
2033 \r
2034 #endif /* configUSE_TRACE_FACILITY */\r
2035 /*-----------------------------------------------------------*/\r
2036 \r
2037 #if ( configUSE_TRACE_FACILITY == 1 )\r
2038 \r
2039         void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )\r
2040         {\r
2041                 ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;\r
2042         }\r
2043 \r
2044 #endif /* configUSE_TRACE_FACILITY */\r
2045 /*-----------------------------------------------------------*/\r
2046 \r
2047 #if ( configUSE_TRACE_FACILITY == 1 )\r
2048 \r
2049         uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )\r
2050         {\r
2051                 return ( ( Queue_t * ) xQueue )->ucQueueType;\r
2052         }\r
2053 \r
2054 #endif /* configUSE_TRACE_FACILITY */\r
2055 /*-----------------------------------------------------------*/\r
2056 \r
2057 #if( configUSE_MUTEXES == 1 )\r
2058 \r
2059         static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )\r
2060         {\r
2061         UBaseType_t uxHighestPriorityOfWaitingTasks;\r
2062 \r
2063                 /* If a task waiting for a mutex causes the mutex holder to inherit a\r
2064                 priority, but the waiting task times out, then the holder should\r
2065                 disinherit the priority - but only down to the highest priority of any\r
2066                 other tasks that are waiting for the same mutex.  For this purpose,\r
2067                 return the priority of the highest priority task that is waiting for the\r
2068                 mutex. */\r
2069                 if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )\r
2070                 {\r
2071                         uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );\r
2072                 }\r
2073                 else\r
2074                 {\r
2075                         uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;\r
2076                 }\r
2077 \r
2078                 return uxHighestPriorityOfWaitingTasks;\r
2079         }\r
2080 \r
2081 #endif /* configUSE_MUTEXES */\r
2082 /*-----------------------------------------------------------*/\r
2083 \r
2084 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )\r
2085 {\r
2086 BaseType_t xReturn = pdFALSE;\r
2087 UBaseType_t uxMessagesWaiting;\r
2088 \r
2089         /* This function is called from a critical section. */\r
2090 \r
2091         uxMessagesWaiting = pxQueue->uxMessagesWaiting;\r
2092 \r
2093         if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )\r
2094         {\r
2095                 #if ( configUSE_MUTEXES == 1 )\r
2096                 {\r
2097                         if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )\r
2098                         {\r
2099                                 /* The mutex is no longer being held. */\r
2100                                 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );\r
2101                                 pxQueue->u.xSemaphore.xMutexHolder = NULL;\r
2102                         }\r
2103                         else\r
2104                         {\r
2105                                 mtCOVERAGE_TEST_MARKER();\r
2106                         }\r
2107                 }\r
2108                 #endif /* configUSE_MUTEXES */\r
2109         }\r
2110         else if( xPosition == queueSEND_TO_BACK )\r
2111         {\r
2112                 ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */\r
2113                 pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */\r
2114                 if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
2115                 {\r
2116                         pxQueue->pcWriteTo = pxQueue->pcHead;\r
2117                 }\r
2118                 else\r
2119                 {\r
2120                         mtCOVERAGE_TEST_MARKER();\r
2121                 }\r
2122         }\r
2123         else\r
2124         {\r
2125                 ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes.  Assert checks null pointer only used when length is 0. */\r
2126                 pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;\r
2127                 if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */\r
2128                 {\r
2129                         pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );\r
2130                 }\r
2131                 else\r
2132                 {\r
2133                         mtCOVERAGE_TEST_MARKER();\r
2134                 }\r
2135 \r
2136                 if( xPosition == queueOVERWRITE )\r
2137                 {\r
2138                         if( uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2139                         {\r
2140                                 /* An item is not being added but overwritten, so subtract\r
2141                                 one from the recorded number of items in the queue so when\r
2142                                 one is added again below the number of recorded items remains\r
2143                                 correct. */\r
2144                                 --uxMessagesWaiting;\r
2145                         }\r
2146                         else\r
2147                         {\r
2148                                 mtCOVERAGE_TEST_MARKER();\r
2149                         }\r
2150                 }\r
2151                 else\r
2152                 {\r
2153                         mtCOVERAGE_TEST_MARKER();\r
2154                 }\r
2155         }\r
2156 \r
2157         pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;\r
2158 \r
2159         return xReturn;\r
2160 }\r
2161 /*-----------------------------------------------------------*/\r
2162 \r
2163 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )\r
2164 {\r
2165         if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )\r
2166         {\r
2167                 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */\r
2168                 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */\r
2169                 {\r
2170                         pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;\r
2171                 }\r
2172                 else\r
2173                 {\r
2174                         mtCOVERAGE_TEST_MARKER();\r
2175                 }\r
2176                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */\r
2177         }\r
2178 }\r
2179 /*-----------------------------------------------------------*/\r
2180 \r
2181 static void prvUnlockQueue( Queue_t * const pxQueue )\r
2182 {\r
2183         /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */\r
2184 \r
2185         /* The lock counts contains the number of extra data items placed or\r
2186         removed from the queue while the queue was locked.  When a queue is\r
2187         locked items can be added or removed, but the event lists cannot be\r
2188         updated. */\r
2189         taskENTER_CRITICAL();\r
2190         {\r
2191                 int8_t cTxLock = pxQueue->cTxLock;\r
2192 \r
2193                 /* See if data was added to the queue while it was locked. */\r
2194                 while( cTxLock > queueLOCKED_UNMODIFIED )\r
2195                 {\r
2196                         /* Data was posted while the queue was locked.  Are any tasks\r
2197                         blocked waiting for data to become available? */\r
2198                         #if ( configUSE_QUEUE_SETS == 1 )\r
2199                         {\r
2200                                 if( pxQueue->pxQueueSetContainer != NULL )\r
2201                                 {\r
2202                                         if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )\r
2203                                         {\r
2204                                                 /* The queue is a member of a queue set, and posting to\r
2205                                                 the queue set caused a higher priority task to unblock.\r
2206                                                 A context switch is required. */\r
2207                                                 vTaskMissedYield();\r
2208                                         }\r
2209                                         else\r
2210                                         {\r
2211                                                 mtCOVERAGE_TEST_MARKER();\r
2212                                         }\r
2213                                 }\r
2214                                 else\r
2215                                 {\r
2216                                         /* Tasks that are removed from the event list will get\r
2217                                         added to the pending ready list as the scheduler is still\r
2218                                         suspended. */\r
2219                                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2220                                         {\r
2221                                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2222                                                 {\r
2223                                                         /* The task waiting has a higher priority so record that a\r
2224                                                         context switch is required. */\r
2225                                                         vTaskMissedYield();\r
2226                                                 }\r
2227                                                 else\r
2228                                                 {\r
2229                                                         mtCOVERAGE_TEST_MARKER();\r
2230                                                 }\r
2231                                         }\r
2232                                         else\r
2233                                         {\r
2234                                                 break;\r
2235                                         }\r
2236                                 }\r
2237                         }\r
2238                         #else /* configUSE_QUEUE_SETS */\r
2239                         {\r
2240                                 /* Tasks that are removed from the event list will get added to\r
2241                                 the pending ready list as the scheduler is still suspended. */\r
2242                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2243                                 {\r
2244                                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2245                                         {\r
2246                                                 /* The task waiting has a higher priority so record that\r
2247                                                 a context switch is required. */\r
2248                                                 vTaskMissedYield();\r
2249                                         }\r
2250                                         else\r
2251                                         {\r
2252                                                 mtCOVERAGE_TEST_MARKER();\r
2253                                         }\r
2254                                 }\r
2255                                 else\r
2256                                 {\r
2257                                         break;\r
2258                                 }\r
2259                         }\r
2260                         #endif /* configUSE_QUEUE_SETS */\r
2261 \r
2262                         --cTxLock;\r
2263                 }\r
2264 \r
2265                 pxQueue->cTxLock = queueUNLOCKED;\r
2266         }\r
2267         taskEXIT_CRITICAL();\r
2268 \r
2269         /* Do the same for the Rx lock. */\r
2270         taskENTER_CRITICAL();\r
2271         {\r
2272                 int8_t cRxLock = pxQueue->cRxLock;\r
2273 \r
2274                 while( cRxLock > queueLOCKED_UNMODIFIED )\r
2275                 {\r
2276                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2277                         {\r
2278                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2279                                 {\r
2280                                         vTaskMissedYield();\r
2281                                 }\r
2282                                 else\r
2283                                 {\r
2284                                         mtCOVERAGE_TEST_MARKER();\r
2285                                 }\r
2286 \r
2287                                 --cRxLock;\r
2288                         }\r
2289                         else\r
2290                         {\r
2291                                 break;\r
2292                         }\r
2293                 }\r
2294 \r
2295                 pxQueue->cRxLock = queueUNLOCKED;\r
2296         }\r
2297         taskEXIT_CRITICAL();\r
2298 }\r
2299 /*-----------------------------------------------------------*/\r
2300 \r
2301 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )\r
2302 {\r
2303 BaseType_t xReturn;\r
2304 \r
2305         taskENTER_CRITICAL();\r
2306         {\r
2307                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )\r
2308                 {\r
2309                         xReturn = pdTRUE;\r
2310                 }\r
2311                 else\r
2312                 {\r
2313                         xReturn = pdFALSE;\r
2314                 }\r
2315         }\r
2316         taskEXIT_CRITICAL();\r
2317 \r
2318         return xReturn;\r
2319 }\r
2320 /*-----------------------------------------------------------*/\r
2321 \r
2322 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )\r
2323 {\r
2324 BaseType_t xReturn;\r
2325 Queue_t * const pxQueue = xQueue;\r
2326 \r
2327         configASSERT( pxQueue );\r
2328         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2329         {\r
2330                 xReturn = pdTRUE;\r
2331         }\r
2332         else\r
2333         {\r
2334                 xReturn = pdFALSE;\r
2335         }\r
2336 \r
2337         return xReturn;\r
2338 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2339 /*-----------------------------------------------------------*/\r
2340 \r
2341 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )\r
2342 {\r
2343 BaseType_t xReturn;\r
2344 \r
2345         taskENTER_CRITICAL();\r
2346         {\r
2347                 if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
2348                 {\r
2349                         xReturn = pdTRUE;\r
2350                 }\r
2351                 else\r
2352                 {\r
2353                         xReturn = pdFALSE;\r
2354                 }\r
2355         }\r
2356         taskEXIT_CRITICAL();\r
2357 \r
2358         return xReturn;\r
2359 }\r
2360 /*-----------------------------------------------------------*/\r
2361 \r
2362 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )\r
2363 {\r
2364 BaseType_t xReturn;\r
2365 Queue_t * const pxQueue = xQueue;\r
2366 \r
2367         configASSERT( pxQueue );\r
2368         if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )\r
2369         {\r
2370                 xReturn = pdTRUE;\r
2371         }\r
2372         else\r
2373         {\r
2374                 xReturn = pdFALSE;\r
2375         }\r
2376 \r
2377         return xReturn;\r
2378 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2379 /*-----------------------------------------------------------*/\r
2380 \r
2381 #if ( configUSE_CO_ROUTINES == 1 )\r
2382 \r
2383         BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )\r
2384         {\r
2385         BaseType_t xReturn;\r
2386         Queue_t * const pxQueue = xQueue;\r
2387 \r
2388                 /* If the queue is already full we may have to block.  A critical section\r
2389                 is required to prevent an interrupt removing something from the queue\r
2390                 between the check to see if the queue is full and blocking on the queue. */\r
2391                 portDISABLE_INTERRUPTS();\r
2392                 {\r
2393                         if( prvIsQueueFull( pxQueue ) != pdFALSE )\r
2394                         {\r
2395                                 /* The queue is full - do we want to block or just leave without\r
2396                                 posting? */\r
2397                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2398                                 {\r
2399                                         /* As this is called from a coroutine we cannot block directly, but\r
2400                                         return indicating that we need to block. */\r
2401                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );\r
2402                                         portENABLE_INTERRUPTS();\r
2403                                         return errQUEUE_BLOCKED;\r
2404                                 }\r
2405                                 else\r
2406                                 {\r
2407                                         portENABLE_INTERRUPTS();\r
2408                                         return errQUEUE_FULL;\r
2409                                 }\r
2410                         }\r
2411                 }\r
2412                 portENABLE_INTERRUPTS();\r
2413 \r
2414                 portDISABLE_INTERRUPTS();\r
2415                 {\r
2416                         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2417                         {\r
2418                                 /* There is room in the queue, copy the data into the queue. */\r
2419                                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2420                                 xReturn = pdPASS;\r
2421 \r
2422                                 /* Were any co-routines waiting for data to become available? */\r
2423                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2424                                 {\r
2425                                         /* In this instance the co-routine could be placed directly\r
2426                                         into the ready list as we are within a critical section.\r
2427                                         Instead the same pending ready list mechanism is used as if\r
2428                                         the event were caused from within an interrupt. */\r
2429                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2430                                         {\r
2431                                                 /* The co-routine waiting has a higher priority so record\r
2432                                                 that a yield might be appropriate. */\r
2433                                                 xReturn = errQUEUE_YIELD;\r
2434                                         }\r
2435                                         else\r
2436                                         {\r
2437                                                 mtCOVERAGE_TEST_MARKER();\r
2438                                         }\r
2439                                 }\r
2440                                 else\r
2441                                 {\r
2442                                         mtCOVERAGE_TEST_MARKER();\r
2443                                 }\r
2444                         }\r
2445                         else\r
2446                         {\r
2447                                 xReturn = errQUEUE_FULL;\r
2448                         }\r
2449                 }\r
2450                 portENABLE_INTERRUPTS();\r
2451 \r
2452                 return xReturn;\r
2453         }\r
2454 \r
2455 #endif /* configUSE_CO_ROUTINES */\r
2456 /*-----------------------------------------------------------*/\r
2457 \r
2458 #if ( configUSE_CO_ROUTINES == 1 )\r
2459 \r
2460         BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )\r
2461         {\r
2462         BaseType_t xReturn;\r
2463         Queue_t * const pxQueue = xQueue;\r
2464 \r
2465                 /* If the queue is already empty we may have to block.  A critical section\r
2466                 is required to prevent an interrupt adding something to the queue\r
2467                 between the check to see if the queue is empty and blocking on the queue. */\r
2468                 portDISABLE_INTERRUPTS();\r
2469                 {\r
2470                         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )\r
2471                         {\r
2472                                 /* There are no messages in the queue, do we want to block or just\r
2473                                 leave with nothing? */\r
2474                                 if( xTicksToWait > ( TickType_t ) 0 )\r
2475                                 {\r
2476                                         /* As this is a co-routine we cannot block directly, but return\r
2477                                         indicating that we need to block. */\r
2478                                         vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );\r
2479                                         portENABLE_INTERRUPTS();\r
2480                                         return errQUEUE_BLOCKED;\r
2481                                 }\r
2482                                 else\r
2483                                 {\r
2484                                         portENABLE_INTERRUPTS();\r
2485                                         return errQUEUE_FULL;\r
2486                                 }\r
2487                         }\r
2488                         else\r
2489                         {\r
2490                                 mtCOVERAGE_TEST_MARKER();\r
2491                         }\r
2492                 }\r
2493                 portENABLE_INTERRUPTS();\r
2494 \r
2495                 portDISABLE_INTERRUPTS();\r
2496                 {\r
2497                         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2498                         {\r
2499                                 /* Data is available from the queue. */\r
2500                                 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;\r
2501                                 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )\r
2502                                 {\r
2503                                         pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;\r
2504                                 }\r
2505                                 else\r
2506                                 {\r
2507                                         mtCOVERAGE_TEST_MARKER();\r
2508                                 }\r
2509                                 --( pxQueue->uxMessagesWaiting );\r
2510                                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2511 \r
2512                                 xReturn = pdPASS;\r
2513 \r
2514                                 /* Were any co-routines waiting for space to become available? */\r
2515                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2516                                 {\r
2517                                         /* In this instance the co-routine could be placed directly\r
2518                                         into the ready list as we are within a critical section.\r
2519                                         Instead the same pending ready list mechanism is used as if\r
2520                                         the event were caused from within an interrupt. */\r
2521                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2522                                         {\r
2523                                                 xReturn = errQUEUE_YIELD;\r
2524                                         }\r
2525                                         else\r
2526                                         {\r
2527                                                 mtCOVERAGE_TEST_MARKER();\r
2528                                         }\r
2529                                 }\r
2530                                 else\r
2531                                 {\r
2532                                         mtCOVERAGE_TEST_MARKER();\r
2533                                 }\r
2534                         }\r
2535                         else\r
2536                         {\r
2537                                 xReturn = pdFAIL;\r
2538                         }\r
2539                 }\r
2540                 portENABLE_INTERRUPTS();\r
2541 \r
2542                 return xReturn;\r
2543         }\r
2544 \r
2545 #endif /* configUSE_CO_ROUTINES */\r
2546 /*-----------------------------------------------------------*/\r
2547 \r
2548 #if ( configUSE_CO_ROUTINES == 1 )\r
2549 \r
2550         BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )\r
2551         {\r
2552         Queue_t * const pxQueue = xQueue;\r
2553 \r
2554                 /* Cannot block within an ISR so if there is no space on the queue then\r
2555                 exit without doing anything. */\r
2556                 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )\r
2557                 {\r
2558                         prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );\r
2559 \r
2560                         /* We only want to wake one co-routine per ISR, so check that a\r
2561                         co-routine has not already been woken. */\r
2562                         if( xCoRoutinePreviouslyWoken == pdFALSE )\r
2563                         {\r
2564                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )\r
2565                                 {\r
2566                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )\r
2567                                         {\r
2568                                                 return pdTRUE;\r
2569                                         }\r
2570                                         else\r
2571                                         {\r
2572                                                 mtCOVERAGE_TEST_MARKER();\r
2573                                         }\r
2574                                 }\r
2575                                 else\r
2576                                 {\r
2577                                         mtCOVERAGE_TEST_MARKER();\r
2578                                 }\r
2579                         }\r
2580                         else\r
2581                         {\r
2582                                 mtCOVERAGE_TEST_MARKER();\r
2583                         }\r
2584                 }\r
2585                 else\r
2586                 {\r
2587                         mtCOVERAGE_TEST_MARKER();\r
2588                 }\r
2589 \r
2590                 return xCoRoutinePreviouslyWoken;\r
2591         }\r
2592 \r
2593 #endif /* configUSE_CO_ROUTINES */\r
2594 /*-----------------------------------------------------------*/\r
2595 \r
2596 #if ( configUSE_CO_ROUTINES == 1 )\r
2597 \r
2598         BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )\r
2599         {\r
2600         BaseType_t xReturn;\r
2601         Queue_t * const pxQueue = xQueue;\r
2602 \r
2603                 /* We cannot block from an ISR, so check there is data available. If\r
2604                 not then just leave without doing anything. */\r
2605                 if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )\r
2606                 {\r
2607                         /* Copy the data from the queue. */\r
2608                         pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;\r
2609                         if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )\r
2610                         {\r
2611                                 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;\r
2612                         }\r
2613                         else\r
2614                         {\r
2615                                 mtCOVERAGE_TEST_MARKER();\r
2616                         }\r
2617                         --( pxQueue->uxMessagesWaiting );\r
2618                         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );\r
2619 \r
2620                         if( ( *pxCoRoutineWoken ) == pdFALSE )\r
2621                         {\r
2622                                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )\r
2623                                 {\r
2624                                         if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )\r
2625                                         {\r
2626                                                 *pxCoRoutineWoken = pdTRUE;\r
2627                                         }\r
2628                                         else\r
2629                                         {\r
2630                                                 mtCOVERAGE_TEST_MARKER();\r
2631                                         }\r
2632                                 }\r
2633                                 else\r
2634                                 {\r
2635                                         mtCOVERAGE_TEST_MARKER();\r
2636                                 }\r
2637                         }\r
2638                         else\r
2639                         {\r
2640                                 mtCOVERAGE_TEST_MARKER();\r
2641                         }\r
2642 \r
2643                         xReturn = pdPASS;\r
2644                 }\r
2645                 else\r
2646                 {\r
2647                         xReturn = pdFAIL;\r
2648                 }\r
2649 \r
2650                 return xReturn;\r
2651         }\r
2652 \r
2653 #endif /* configUSE_CO_ROUTINES */\r
2654 /*-----------------------------------------------------------*/\r
2655 \r
2656 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2657 \r
2658         void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2659         {\r
2660         UBaseType_t ux;\r
2661 \r
2662                 /* See if there is an empty space in the registry.  A NULL name denotes\r
2663                 a free slot. */\r
2664                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2665                 {\r
2666                         if( xQueueRegistry[ ux ].pcQueueName == NULL )\r
2667                         {\r
2668                                 /* Store the information on this queue. */\r
2669                                 xQueueRegistry[ ux ].pcQueueName = pcQueueName;\r
2670                                 xQueueRegistry[ ux ].xHandle = xQueue;\r
2671 \r
2672                                 traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );\r
2673                                 break;\r
2674                         }\r
2675                         else\r
2676                         {\r
2677                                 mtCOVERAGE_TEST_MARKER();\r
2678                         }\r
2679                 }\r
2680         }\r
2681 \r
2682 #endif /* configQUEUE_REGISTRY_SIZE */\r
2683 /*-----------------------------------------------------------*/\r
2684 \r
2685 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2686 \r
2687         const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2688         {\r
2689         UBaseType_t ux;\r
2690         const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */\r
2691 \r
2692                 /* Note there is nothing here to protect against another task adding or\r
2693                 removing entries from the registry while it is being searched. */\r
2694                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2695                 {\r
2696                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2697                         {\r
2698                                 pcReturn = xQueueRegistry[ ux ].pcQueueName;\r
2699                                 break;\r
2700                         }\r
2701                         else\r
2702                         {\r
2703                                 mtCOVERAGE_TEST_MARKER();\r
2704                         }\r
2705                 }\r
2706 \r
2707                 return pcReturn;\r
2708         } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */\r
2709 \r
2710 #endif /* configQUEUE_REGISTRY_SIZE */\r
2711 /*-----------------------------------------------------------*/\r
2712 \r
2713 #if ( configQUEUE_REGISTRY_SIZE > 0 )\r
2714 \r
2715         void vQueueUnregisterQueue( QueueHandle_t xQueue )\r
2716         {\r
2717         UBaseType_t ux;\r
2718 \r
2719                 /* See if the handle of the queue being unregistered in actually in the\r
2720                 registry. */\r
2721                 for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )\r
2722                 {\r
2723                         if( xQueueRegistry[ ux ].xHandle == xQueue )\r
2724                         {\r
2725                                 /* Set the name to NULL to show that this slot if free again. */\r
2726                                 xQueueRegistry[ ux ].pcQueueName = NULL;\r
2727 \r
2728                                 /* Set the handle to NULL to ensure the same queue handle cannot\r
2729                                 appear in the registry twice if it is added, removed, then\r
2730                                 added again. */\r
2731                                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;\r
2732                                 break;\r
2733                         }\r
2734                         else\r
2735                         {\r
2736                                 mtCOVERAGE_TEST_MARKER();\r
2737                         }\r
2738                 }\r
2739 \r
2740         } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */\r
2741 \r
2742 #endif /* configQUEUE_REGISTRY_SIZE */\r
2743 /*-----------------------------------------------------------*/\r
2744 \r
2745 #if ( configUSE_TIMERS == 1 )\r
2746 \r
2747         void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )\r
2748         {\r
2749         Queue_t * const pxQueue = xQueue;\r
2750 \r
2751                 /* This function should not be called by application code hence the\r
2752                 'Restricted' in its name.  It is not part of the public API.  It is\r
2753                 designed for use by kernel code, and has special calling requirements.\r
2754                 It can result in vListInsert() being called on a list that can only\r
2755                 possibly ever have one item in it, so the list will be fast, but even\r
2756                 so it should be called with the scheduler locked and not from a critical\r
2757                 section. */\r
2758 \r
2759                 /* Only do anything if there are no messages in the queue.  This function\r
2760                 will not actually cause the task to block, just place it on a blocked\r
2761                 list.  It will not block until the scheduler is unlocked - at which\r
2762                 time a yield will be performed.  If an item is added to the queue while\r
2763                 the queue is locked, and the calling task blocks on the queue, then the\r
2764                 calling task will be immediately unblocked when the queue is unlocked. */\r
2765                 prvLockQueue( pxQueue );\r
2766                 if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )\r
2767                 {\r
2768                         /* There is nothing in the queue, block for the specified period. */\r
2769                         vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );\r
2770                 }\r
2771                 else\r
2772                 {\r
2773                         mtCOVERAGE_TEST_MARKER();\r
2774                 }\r
2775                 prvUnlockQueue( pxQueue );\r
2776         }\r
2777 \r
2778 #endif /* configUSE_TIMERS */\r
2779 /*-----------------------------------------------------------*/\r
2780 \r
2781 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )\r
2782 \r
2783         QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )\r
2784         {\r
2785         QueueSetHandle_t pxQueue;\r
2786 \r
2787                 pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );\r
2788 \r
2789                 return pxQueue;\r
2790         }\r
2791 \r
2792 #endif /* configUSE_QUEUE_SETS */\r
2793 /*-----------------------------------------------------------*/\r
2794 \r
2795 #if ( configUSE_QUEUE_SETS == 1 )\r
2796 \r
2797         BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2798         {\r
2799         BaseType_t xReturn;\r
2800 \r
2801                 taskENTER_CRITICAL();\r
2802                 {\r
2803                         if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )\r
2804                         {\r
2805                                 /* Cannot add a queue/semaphore to more than one queue set. */\r
2806                                 xReturn = pdFAIL;\r
2807                         }\r
2808                         else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2809                         {\r
2810                                 /* Cannot add a queue/semaphore to a queue set if there are already\r
2811                                 items in the queue/semaphore. */\r
2812                                 xReturn = pdFAIL;\r
2813                         }\r
2814                         else\r
2815                         {\r
2816                                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;\r
2817                                 xReturn = pdPASS;\r
2818                         }\r
2819                 }\r
2820                 taskEXIT_CRITICAL();\r
2821 \r
2822                 return xReturn;\r
2823         }\r
2824 \r
2825 #endif /* configUSE_QUEUE_SETS */\r
2826 /*-----------------------------------------------------------*/\r
2827 \r
2828 #if ( configUSE_QUEUE_SETS == 1 )\r
2829 \r
2830         BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )\r
2831         {\r
2832         BaseType_t xReturn;\r
2833         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;\r
2834 \r
2835                 if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )\r
2836                 {\r
2837                         /* The queue was not a member of the set. */\r
2838                         xReturn = pdFAIL;\r
2839                 }\r
2840                 else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )\r
2841                 {\r
2842                         /* It is dangerous to remove a queue from a set when the queue is\r
2843                         not empty because the queue set will still hold pending events for\r
2844                         the queue. */\r
2845                         xReturn = pdFAIL;\r
2846                 }\r
2847                 else\r
2848                 {\r
2849                         taskENTER_CRITICAL();\r
2850                         {\r
2851                                 /* The queue is no longer contained in the set. */\r
2852                                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;\r
2853                         }\r
2854                         taskEXIT_CRITICAL();\r
2855                         xReturn = pdPASS;\r
2856                 }\r
2857 \r
2858                 return xReturn;\r
2859         } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */\r
2860 \r
2861 #endif /* configUSE_QUEUE_SETS */\r
2862 /*-----------------------------------------------------------*/\r
2863 \r
2864 #if ( configUSE_QUEUE_SETS == 1 )\r
2865 \r
2866         QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )\r
2867         {\r
2868         QueueSetMemberHandle_t xReturn = NULL;\r
2869 \r
2870                 ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2871                 return xReturn;\r
2872         }\r
2873 \r
2874 #endif /* configUSE_QUEUE_SETS */\r
2875 /*-----------------------------------------------------------*/\r
2876 \r
2877 #if ( configUSE_QUEUE_SETS == 1 )\r
2878 \r
2879         QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )\r
2880         {\r
2881         QueueSetMemberHandle_t xReturn = NULL;\r
2882 \r
2883                 ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */\r
2884                 return xReturn;\r
2885         }\r
2886 \r
2887 #endif /* configUSE_QUEUE_SETS */\r
2888 /*-----------------------------------------------------------*/\r
2889 \r
2890 #if ( configUSE_QUEUE_SETS == 1 )\r
2891 \r
2892         static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )\r
2893         {\r
2894         Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;\r
2895         BaseType_t xReturn = pdFALSE;\r
2896 \r
2897                 /* This function must be called form a critical section. */\r
2898 \r
2899                 configASSERT( pxQueueSetContainer );\r
2900                 configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );\r
2901 \r
2902                 if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )\r
2903                 {\r
2904                         const int8_t cTxLock = pxQueueSetContainer->cTxLock;\r
2905 \r
2906                         traceQUEUE_SET_SEND( pxQueueSetContainer );\r
2907 \r
2908                         /* The data copied is the handle of the queue that contains data. */\r
2909                         xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );\r
2910 \r
2911                         if( cTxLock == queueUNLOCKED )\r
2912                         {\r
2913                                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )\r
2914                                 {\r
2915                                         if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )\r
2916                                         {\r
2917                                                 /* The task waiting has a higher priority. */\r
2918                                                 xReturn = pdTRUE;\r
2919                                         }\r
2920                                         else\r
2921                                         {\r
2922                                                 mtCOVERAGE_TEST_MARKER();\r
2923                                         }\r
2924                                 }\r
2925                                 else\r
2926                                 {\r
2927                                         mtCOVERAGE_TEST_MARKER();\r
2928                                 }\r
2929                         }\r
2930                         else\r
2931                         {\r
2932                                 configASSERT( cTxLock != queueINT8_MAX);\r
2933 \r
2934                                 pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );\r
2935                         }\r
2936                 }\r
2937                 else\r
2938                 {\r
2939                         mtCOVERAGE_TEST_MARKER();\r
2940                 }\r
2941 \r
2942                 return xReturn;\r
2943         }\r
2944 \r
2945 #endif /* configUSE_QUEUE_SETS */\r
2946 \r
2947 \r
2948 \r
2949 \r
2950 \r
2951 \r
2952 \r
2953 \r
2954 \r
2955 \r
2956 \r
2957 \r