3 * Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 * the Software, and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * https://www.FreeRTOS.org
23 * https://github.com/FreeRTOS
34 #include <threading.h>
35 /*@#include "common.gh"@*/
37 typedef size_t TickType_t;
38 typedef size_t UBaseType_t;
39 typedef ssize_t BaseType_t;
41 /* Empty/no-op macros */
43 #define traceBLOCKING_ON_QUEUE_PEEK( x )
44 #define traceBLOCKING_ON_QUEUE_RECEIVE( x )
45 #define traceBLOCKING_ON_QUEUE_SEND( x )
46 #define traceQUEUE_CREATE( x )
47 #define traceQUEUE_CREATE_FAILED( x )
48 #define traceQUEUE_DELETE( x )
49 #define traceQUEUE_PEEK( x )
50 #define traceQUEUE_PEEK_FAILED( x )
51 #define traceQUEUE_PEEK_FROM_ISR( x )
52 #define traceQUEUE_PEEK_FROM_ISR_FAILED( x )
53 #define traceQUEUE_RECEIVE( x )
54 #define traceQUEUE_RECEIVE_FAILED( x )
55 #define traceQUEUE_RECEIVE_FROM_ISR( x )
56 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( x )
57 #define traceQUEUE_SEND( x )
58 #define traceQUEUE_SEND_FAILED( x )
59 #define traceQUEUE_SEND_FROM_ISR( x )
60 #define traceQUEUE_SEND_FROM_ISR_FAILED( x )
62 #define mtCOVERAGE_TEST_MARKER()
64 #define configASSERT( x )
65 #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
67 /* Map portable memory management functions */
68 #define pvPortMalloc malloc
69 #define vPortFree free
71 #define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
72 #define queueSEND_TO_FRONT ( ( BaseType_t ) 1 )
73 #define queueOVERWRITE ( ( BaseType_t ) 2 )
79 #define pdFAIL pdFALSE
80 #define errQUEUE_FULL 0
81 #define errQUEUE_EMPTY 0
83 /* Constants used with the cRxLock and cTxLock structure members. */
84 #define queueUNLOCKED ( ( int8_t ) -1 )
85 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
86 #define queueINT8_MAX ( ( int8_t ) 127 )
88 typedef struct QueuePointers
90 int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
91 int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
94 typedef struct SemaphoreData
96 #ifdef VERIFAST /*< do not model xMutexHolder */
99 TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
101 UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
104 /* VeriFast does not support unions so we replace with a struct */
107 QueuePointers_t xQueue;
108 SemaphoreData_t xSemaphore;
113 UBaseType_t uxNumberOfItems;
114 #ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
115 struct xLIST_ITEM * pxIndex;
116 MiniListItem_t xListEnd;
120 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
122 int8_t * pcHead; /*< Points to the beginning of the queue storage area. */
123 int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */
125 #ifdef VERIFAST /*< VeriFast does not model unions */
126 struct fake_union_t u;
130 QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
131 SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
135 List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
136 List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
138 volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */
139 UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
140 UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
142 volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
143 volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
145 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
146 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
149 #if ( configUSE_QUEUE_SETS == 1 )
150 struct QueueDefinition * pxQueueSetContainer;
153 #if ( configUSE_TRACE_FACILITY == 1 )
154 UBaseType_t uxQueueNumber;
158 /*@struct mutex *irqMask;@*/ /*< Ghost mutex simulates the effect of irq masking */
159 /*@struct mutex *schedulerSuspend;@*/ /*< Ghost mutex simulates the effect of scheduler suspension */
160 /*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */
163 typedef xQUEUE Queue_t;
165 typedef struct QueueDefinition * QueueHandle_t;
168 #define QUEUE_SHAPE(q, Storage, N, M, K) \
169 * malloc_block_QueueDefinition(q) &*& \
170 * q->pcHead |-> Storage &*& \
171 * q->pcWriteTo |-> ?WPtr &*& \
172 * q->u.xQueue.pcTail |-> ?End &*& \
173 * q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
174 * q->uxItemSize |-> M &*& \
175 * q->uxLength |-> N &*& \
176 * q->uxMessagesWaiting |-> K &*& \
177 * q->cRxLock |-> ?rxLock &*& \
178 * q->cTxLock |-> ?txLock &*& \
179 * struct_QueuePointers_padding(&q->u.xQueue) &*& \
180 * struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
181 * struct_fake_union_t_padding(&q->u) &*& \
182 * struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
183 * struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
184 * q->u.xSemaphore.xMutexHolder |-> _ &*& \
185 * q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
188 * predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
189 * QUEUE_SHAPE(q, Storage, N, M, K) &*&
192 * 0 <= W &*& W < N &*&
193 * 0 <= R &*& R < N &*&
194 * 0 <= K &*& K <= N &*&
195 * W == (R + 1 + K) % N &*&
198 * (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
199 * (is_locked ? 0 <= txLock : (-1) == txLock) &*&
200 * WPtr == Storage + (W*M) &*&
201 * RPtr == Storage + (R*M) &*&
202 * End == Storage + (N*M) &*&
203 * buffer(Storage, N, M, ?contents) &*&
204 * length(contents) == N &*&
205 * abs == take(K, rotate_left((R+1)%N, contents)) &*&
206 * malloc_block(Storage, N*M) &*&
211 /* A buffer allows us to interpret a flat character array of `N*M` bytes as a
212 * list of `N` elements where each element is `M` bytes */
215 * predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
218 * : chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
220 * lemma void buffer_length(char *buffer, size_t N, size_t M)
221 * requires buffer(buffer, N, M, ?elements);
222 * ensures buffer(buffer, N, M, elements) &*& length(elements) == N;
225 * open buffer(buffer, N, M, elements);
226 * close buffer(buffer, N, M, elements);
228 * open buffer(buffer, N, M, elements);
229 * buffer_length(buffer+M, N-1, M);
235 * There is no need in the queue proofs to preserve a relationship between `cs`
236 * and `elements` (i.e., `flatten(elements) == cs`) because we only move in one
237 * direction from `cs` to `elements` during queue creation when the contents is
238 * fresh from `malloc` (i.e., uninitialized). If we needed to do a roundtrip from
239 * elements back to cs then this would require a stronger lemma.
243 * lemma void buffer_from_chars(char *buffer, size_t N, size_t M)
244 * requires chars(buffer, N*M, ?cs) &*& 0 <= N &*& 0 < M;
245 * ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
252 * invariant 0 <= i &*& i <= N &*&
253 * chars(buffer, (N-i)*M, ?xs) &*& xs == take((N-i)*M, cs) &*&
254 * buffer(buffer + (N-i)*M, i, M, ?ys);
257 * mul_mono_l(0, N-i-1, M);
258 * chars_split(buffer, (N-i-1)*M);
259 * mul_mono_l(i, N, M);
260 * mul_mono_l(N-i, N, M);
261 * take_take((N-i-1)*M, (N-i)*M, cs);
265 * buffer_length(buffer, N, M);
269 * lemma void append_buffer(char *buffer, size_t N1, size_t N2, size_t M)
271 * buffer(buffer, N1, M, ?elements1) &*&
272 * buffer(buffer + N1 * M, N2, M, ?elements2) &*&
273 * 0 <= N1 &*& 0 <= N2;
274 * ensures buffer(buffer, N1+N2, M, append(elements1, elements2));
277 * open buffer(buffer, 0, M, _);
278 * } else if (N2 == 0) {
279 * open buffer(buffer + N1 * M, 0, M, _);
281 * open buffer(buffer, N1, M, elements1);
282 * append_buffer(buffer + M, N1-1, N2, M);
283 * close buffer(buffer, N1+N2, M, cons(?x, append(xs, elements2)));
287 * lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i)
288 * requires buffer(buffer, N, M, ?elements) &*& 0 <= i &*& i < N;
290 * buffer(buffer, i, M, take(i, elements)) &*&
291 * chars(buffer + i * M, M, nth(i, elements)) &*&
292 * buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
297 * buffer_length(buffer, N, M);
300 * invariant 0 <= j &*& j <= i &*&
301 * buffer(buffer, j, M, take(j, elements)) &*&
302 * buffer(buffer + j * M, N-j, M, drop(j, elements));
305 * drop_drop(1, j, elements);
306 * nth_drop2(elements, j);
307 * open buffer(buffer + j * M, N-j, M, drop(j, elements));
308 * assert chars(buffer + j * M, M, ?x) &*& x == nth(j, elements);
309 * close buffer(buffer + j * M, 1, M, singleton(x));
310 * append_buffer(buffer, j, 1, M);
311 * take_plus_one(j, elements);
314 * drop_drop(1, j, elements);
315 * nth_drop2(elements, i);
319 * lemma void join_element(char *buffer, size_t N, size_t M, size_t i)
321 * 0 <= i &*& i < N &*&
322 * buffer(buffer, i, M, ?prefix) &*&
323 * chars(buffer + i * M, M, ?element) &*&
324 * buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
325 * ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
328 * open buffer(buffer, i, M, prefix);
329 * assert prefix == nil;
330 * close buffer(buffer, N, M, cons(element, suffix));
332 * close buffer(buffer + i * M, N-i, M, cons(element, suffix));
333 * append_buffer(buffer, i, N-i, M);
337 * predicate list(List_t *l;) =
338 * l->uxNumberOfItems |-> _;
340 * predicate queuelists(QueueHandle_t q;) =
341 * list(&q->xTasksWaitingToSend) &*&
342 * list(&q->xTasksWaitingToReceive);
345 /* Because prvCopyDataFromQueue does *not* decrement uxMessagesWaiting (K) the
346 * queue predicate above does not hold as a postcondition. If the caller
347 * subsequently decrements K then the queue predicate can be reinstated. */
350 * predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
351 * QUEUE_SHAPE(q, Storage, N, M, K) &*&
354 * 0 <= W &*& W < N &*&
355 * 0 <= R &*& R < N &*&
356 * 0 <= K &*& K <= N &*&
357 * W == (R + K) % N &*& //< Differs from queue predicate
360 * (is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
361 * (is_locked ? 0 <= txLock : (-1) == txLock) &*&
362 * WPtr == Storage + (W*M) &*&
363 * RPtr == Storage + (R*M) &*&
364 * End == Storage + (N*M) &*&
365 * buffer(Storage, N, M, ?contents) &*&
366 * length(contents) == N &*&
367 * abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
368 * malloc_block(Storage, N*M) &*&
373 /* Can't be called `mutex` as this clashes with VeriFast's predicate */
376 * predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
377 * QUEUE_SHAPE(q, Storage, N, 0, K) &*&
380 * 0 <= K &*& K <= N &*&
383 * WPtr == Storage &*&
384 * RPtr == Storage &*&
386 * malloc_block(Storage, 0) &*&
387 * chars(Storage, 0, _) &*&
392 /* A queuehandle can be shared between tasks and ISRs. Acquiring the ghost
393 * `irqMask` gives access to the core queue resources. The permissions granted
394 * after masking interrupts depends on the caller:
395 * - A task has access to the queue and the queuelists
396 * - An ISR has access to the queue and, if the queue is unlocked, the queuelists */
399 * predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
400 * q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
402 * predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
403 * queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
404 * (is_isr && is_locked ? true : queuelists(queue));
407 /* A queuesuspend can be shared between tasks. Acquiring the ghost `schedulerSuspend` gives access to the `locked` mutex. */
410 * predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
411 * queue->locked |-> ?m &*&
412 * mutex(m, queue_locked_invariant(queue));
414 * predicate queuesuspend(QueueHandle_t q;) =
415 * q->schedulerSuspend |-> ?m &*&
416 * mutex(m, scheduler_suspended_invariant(q));
419 /* A queuelock is exclusively acquired by a task. Acquiring the ghost `queuelock` gives access to the queue list resources. */
422 * predicate queuelock(QueueHandle_t q;) =
423 * q->locked |-> ?m &*&
424 * mutex(m, queue_locked_invariant(q));
426 * predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
430 BaseType_t vListInitialise( List_t * list );
431 /*@requires list(list);@*/
432 /*@ensures list(list);@*/
434 BaseType_t listLIST_IS_EMPTY( List_t * list );
435 /*@requires list->uxNumberOfItems |-> ?len;@*/
436 /*@ensures list->uxNumberOfItems |-> len &*& result == (len == 0 ? pdTRUE : pdFALSE);@*/
438 typedef struct xTIME_OUT
440 BaseType_t xOverflowCount;
441 TickType_t xTimeOnEntering;
445 * predicate xTIME_OUT(struct xTIME_OUT *to;) =
446 * to->xOverflowCount |-> _ &*&
447 * to->xTimeOnEntering |-> _ &*&
448 * struct_xTIME_OUT_padding(to);
451 void vTaskInternalSetTimeOutState( TimeOut_t * x );
452 /*@requires xTIME_OUT(x);@*/
453 /*@ensures xTIME_OUT(x);@*/
455 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
456 TickType_t * const pxTicksToWait );
457 /*@requires xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
458 /*@ensures xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
460 BaseType_t xTaskRemoveFromEventList( List_t * list );
461 /*@requires list(list);@*/
462 /*@ensures list(list);@*/
464 void vTaskPlaceOnEventList( List_t * const pxEventList,
465 const TickType_t xTicksToWait );
466 /*@requires list(pxEventList);@*/
467 /*@ensures list(pxEventList);@*/
469 void vTaskMissedYield();
473 void vTaskSuspendAll();
475 /*@requires exists<QueueHandle_t>(?xQueue) &*&
476 * [1/2]xQueue->schedulerSuspend |-> ?m &*&
477 * [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
479 /*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
480 * mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
481 * xQueue->locked |-> ?m2 &*&
482 * mutex(m2, queue_locked_invariant(xQueue));@*/
484 BaseType_t xTaskResumeAll( void );
486 /*@requires exists<QueueHandle_t>(?xQueue) &*&
487 * [1/2]xQueue->schedulerSuspend |-> ?m &*&
488 * mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
489 * xQueue->locked |-> ?m2 &*&
490 * mutex(m2, queue_locked_invariant(xQueue));@*/
492 /*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
493 * [1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
495 void prvLockQueue( QueueHandle_t xQueue );
497 /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
498 * [1/2]queuelock(xQueue); @*/
500 /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
501 * [1/2]xQueue->locked |-> ?m &*&
502 * mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
503 * queue_locked_invariant(xQueue)();@*/
505 void prvUnlockQueue( QueueHandle_t xQueue );
507 /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
508 * [1/2]xQueue->locked |-> ?m &*&
509 * mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
510 * queue_locked_invariant(xQueue)();@*/
512 /*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
513 * [1/2]queuelock(xQueue);@*/
515 void setInterruptMask( QueueHandle_t xQueue )
516 /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
518 /*@ensures [1/2]xQueue->irqMask |-> ?m &*&
519 * mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
520 * queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
521 * queuelists(xQueue);@*/
523 /*@open queuehandle(xQueue, N, M, is_isr);@*/
524 mutex_acquire( xQueue->irqMask );
525 /*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
528 void clearInterruptMask( QueueHandle_t xQueue )
530 /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
531 * [1/2]xQueue->irqMask |-> ?m &*&
532 * mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
533 * queuelists(xQueue);@*/
534 /*@ensures [1/2]queuehandle(xQueue, N, M, false);@*/
536 /*@close irqs_masked_invariant(xQueue, N, M, false)();@*/
537 mutex_release( xQueue->irqMask );
538 /*@close [1/2]queuehandle(xQueue, N, M, false);@*/
541 #define taskENTER_CRITICAL() setInterruptMask( xQueue )
542 #define taskEXIT_CRITICAL() clearInterruptMask( xQueue )
543 #define portYIELD_WITHIN_API()
544 #define queueYIELD_IF_USING_PREEMPTION()
546 UBaseType_t setInterruptMaskFromISR( QueueHandle_t xQueue )
547 /*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true;@*/
549 /*@ensures [1/2]xQueue->irqMask |-> ?m &*&
550 * mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
551 * queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
552 * (is_locked ? true : queuelists(xQueue));@*/
554 /*@open queuehandle(xQueue, N, M, is_isr);@*/
555 mutex_acquire( xQueue->irqMask );
556 /*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
560 void clearInterruptMaskFromISR( QueueHandle_t xQueue,
561 UBaseType_t uxSavedInterruptStatus )
563 /*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
564 * [1/2]xQueue->irqMask |-> ?m &*&
565 * mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
566 * (is_locked ? true : queuelists(xQueue));@*/
567 /*@ensures [1/2]queuehandle(xQueue, N, M, true);@*/
569 /*@close irqs_masked_invariant(xQueue, N, M, true)();@*/
570 mutex_release( xQueue->irqMask );
571 /*@close [1/2]queuehandle(xQueue, N, M, true);@*/
574 #define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR( xQueue )
575 #define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ) clearInterruptMaskFromISR( xQueue, uxSavedInterruptStatus )