]> begriffs open source - cmsis-freertos/blob - Test/VeriFast/queue/xQueueGenericSend.c
Merge branch 'develop'
[cmsis-freertos] / Test / VeriFast / queue / xQueueGenericSend.c
1 /*
2  * FreeRTOS V202111.00
3  * Copyright (C) Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a copy of
6  * this software and associated documentation files (the "Software"), to deal in
7  * the Software without restriction, including without limitation the rights to
8  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9  * the Software, and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in all
13  * copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * https://www.FreeRTOS.org
23  * https://github.com/FreeRTOS
24  *
25  */
26
27 #include "proof/queue.h"
28 #include "proof/queuecontracts.h"
29
30 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
31                               const void * const pvItemToQueue,
32                               TickType_t xTicksToWait,
33                               const BaseType_t xCopyPosition )
34
35 /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
36  *  [1/2]queuesuspend(xQueue) &*&
37  *  chars(pvItemToQueue, M, ?x) &*&
38  *  (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
39
40 /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
41  *  [1/2]queuesuspend(xQueue) &*&
42  *  chars(pvItemToQueue, M, x);@*/
43 {
44     BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
45     TimeOut_t xTimeOut;
46
47     #ifdef VERIFAST /*< const pointer declaration */
48         Queue_t * pxQueue = xQueue;
49     #else
50         Queue_t * const pxQueue = xQueue;
51
52         configASSERT( pxQueue );
53         configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
54         configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
55         #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
56             {
57                 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
58             }
59         #endif
60     #endif /* ifdef VERIFAST */
61
62     /*lint -save -e904 This function relaxes the coding standard somewhat to
63      * allow return statements within the function itself.  This is done in the
64      * interest of execution time efficiency. */
65     for( ; ; )
66
67     /*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
68      *  [1/2]queuesuspend(xQueue) &*&
69      *  chars(pvItemToQueue, M, x) &*&
70      *  u_integer(&xTicksToWait, _) &*&
71      *  (xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
72      *  xTIME_OUT(&xTimeOut);@*/
73     {
74         taskENTER_CRITICAL();
75         {
76             /*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
77
78             /* Is there room on the queue now?  The running task must be the
79              * highest priority task wanting to access the queue.  If the head item
80              * in the queue is to be overwritten then it does not matter if the
81              * queue is full. */
82             if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
83             {
84                 traceQUEUE_SEND( pxQueue );
85
86                 /* VeriFast: we do not verify this configuration option */
87                 #if ( configUSE_QUEUE_SETS == 1 )
88                     {
89                         const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
90
91                         xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
92
93                         if( pxQueue->pxQueueSetContainer != NULL )
94                         {
95                             if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
96                             {
97                                 /* Do not notify the queue set as an existing item
98                                  * was overwritten in the queue so the number of items
99                                  * in the queue has not changed. */
100                                 mtCOVERAGE_TEST_MARKER();
101                             }
102                             else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
103                             {
104                                 /* The queue is a member of a queue set, and posting
105                                  * to the queue set caused a higher priority task to
106                                  * unblock. A context switch is required. */
107                                 queueYIELD_IF_USING_PREEMPTION();
108                             }
109                             else
110                             {
111                                 mtCOVERAGE_TEST_MARKER();
112                             }
113                         }
114                         else
115                         {
116                             /* If there was a task waiting for data to arrive on the
117                              * queue then unblock it now. */
118                             if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
119                             {
120                                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
121                                 {
122                                     /* The unblocked task has a priority higher than
123                                      * our own so yield immediately.  Yes it is ok to
124                                      * do this from within the critical section - the
125                                      * kernel takes care of that. */
126                                     queueYIELD_IF_USING_PREEMPTION();
127                                 }
128                                 else
129                                 {
130                                     mtCOVERAGE_TEST_MARKER();
131                                 }
132                             }
133                             else if( xYieldRequired != pdFALSE )
134                             {
135                                 /* This path is a special case that will only get
136                                  * executed if the task was holding multiple mutexes
137                                  * and the mutexes were given back in an order that is
138                                  * different to that in which they were taken. */
139                                 queueYIELD_IF_USING_PREEMPTION();
140                             }
141                             else
142                             {
143                                 mtCOVERAGE_TEST_MARKER();
144                             }
145                         }
146                     }
147                 #else /* configUSE_QUEUE_SETS */
148                     {
149                         /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
150                         xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
151
152                         /* If there was a task waiting for data to arrive on the
153                          * queue then unblock it now. */
154                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
155                         {
156                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
157                             {
158                                 /* The unblocked task has a priority higher than
159                                  * our own so yield immediately.  Yes it is ok to do
160                                  * this from within the critical section - the kernel
161                                  * takes care of that. */
162                                 queueYIELD_IF_USING_PREEMPTION();
163                             }
164                             else
165                             {
166                                 mtCOVERAGE_TEST_MARKER();
167                             }
168                         }
169                         else if( xYieldRequired != pdFALSE )
170                         {
171                             /* This path is a special case that will only get
172                              * executed if the task was holding multiple mutexes and
173                              * the mutexes were given back in an order that is
174                              * different to that in which they were taken. */
175                             queueYIELD_IF_USING_PREEMPTION();
176                         }
177                         else
178                         {
179                             mtCOVERAGE_TEST_MARKER();
180                         }
181                     }
182                 #endif /* configUSE_QUEUE_SETS */
183
184                 /*@
185                  * if (xCopyPosition == queueSEND_TO_BACK)
186                  * {
187                  *  close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
188                  * }
189                  * else if (xCopyPosition == queueSEND_TO_FRONT)
190                  * {
191                  *  close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
192                  * }
193                  * else if (xCopyPosition == queueOVERWRITE)
194                  * {
195                  *  close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
196                  * }
197                  * @*/
198                 taskEXIT_CRITICAL();
199                 return pdPASS;
200             }
201             else
202             {
203                 if( xTicksToWait == ( TickType_t ) 0 )
204                 {
205                     /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
206
207                     /* The queue was full and no block time is specified (or
208                      * the block time has expired) so leave now. */
209                     taskEXIT_CRITICAL();
210
211                     /* Return to the original privilege level before exiting
212                      * the function. */
213                     traceQUEUE_SEND_FAILED( pxQueue );
214                     return errQUEUE_FULL;
215                 }
216                 else if( xEntryTimeSet == pdFALSE )
217                 {
218                     /* The queue was full and a block time was specified so
219                      * configure the timeout structure. */
220                     vTaskInternalSetTimeOutState( &xTimeOut );
221                     xEntryTimeSet = pdTRUE;
222                 }
223                 else
224                 {
225                     /* Entry time was already set. */
226                     mtCOVERAGE_TEST_MARKER();
227                 }
228             }
229
230             /*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
231         }
232         taskEXIT_CRITICAL();
233
234         /* Interrupts and other tasks can send to and receive from the queue
235          * now the critical section has been exited. */
236
237         /*@close exists(pxQueue);@*/
238         vTaskSuspendAll();
239         prvLockQueue( pxQueue );
240
241         /* Update the timeout state to see if it has expired yet. */
242         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
243         {
244             if( prvIsQueueFull( pxQueue ) != pdFALSE )
245             {
246                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
247                 /*@open queue_locked_invariant(xQueue)();@*/
248                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
249
250                 /* Unlocking the queue means queue events can effect the
251                  * event list.  It is possible that interrupts occurring now
252                  * remove this task from the event list again - but as the
253                  * scheduler is suspended the task will go onto the pending
254                  * ready last instead of the actual ready list. */
255                 /*@close queue_locked_invariant(xQueue)();@*/
256                 prvUnlockQueue( pxQueue );
257
258                 /* Resuming the scheduler will move tasks from the pending
259                  * ready list into the ready list - so it is feasible that this
260                  * task is already in a ready list before it yields - in which
261                  * case the yield will not cause a context switch unless there
262                  * is also a higher priority task in the pending ready list. */
263                 /*@close exists(pxQueue);@*/
264                 if( xTaskResumeAll() == pdFALSE )
265                 {
266                     portYIELD_WITHIN_API();
267                 }
268             }
269             else
270             {
271                 /* Try again. */
272                 prvUnlockQueue( pxQueue );
273                 #ifdef VERIFAST /*< void cast of unused return value */
274                     /*@close exists(pxQueue);@*/
275                     xTaskResumeAll();
276                 #else
277                     ( void ) xTaskResumeAll();
278                 #endif
279             }
280         }
281         else
282         {
283             /* The timeout has expired. */
284             prvUnlockQueue( pxQueue );
285             #ifdef VERIFAST /*< void cast of unused return value */
286                 /*@close exists(pxQueue);@*/
287                 xTaskResumeAll();
288             #else
289                 ( void ) xTaskResumeAll();
290             #endif
291
292             traceQUEUE_SEND_FAILED( pxQueue );
293             return errQUEUE_FULL;
294         }
295     } /*lint -restore */
296 }