1 /*******************************************************************************
\r
2 * Tracealyzer v2.5.0 Recorder Library
\r
3 * Percepio AB, www.percepio.com
\r
7 * Kernel-specific functionality for FreeRTOS, used by the recorder library.
\r
10 * This software is copyright Percepio AB. The recorder library is free for
\r
11 * use together with Percepio products. You may distribute the recorder library
\r
12 * in its original form, including modifications in trcHardwarePort.c/.h
\r
13 * given that these modification are clearly marked as your own modifications
\r
14 * and documented in the initial comment section of these source files.
\r
15 * This software is the intellectual property of Percepio AB and may not be
\r
16 * sold or in other ways commercially redistributed without explicit written
\r
17 * permission by Percepio AB.
\r
20 * The trace tool and recorder library is being delivered to you AS IS and
\r
21 * Percepio AB makes no warranty as to its use or performance. Percepio AB does
\r
22 * not and cannot warrant the performance or results you may obtain by using the
\r
23 * software or documentation. Percepio AB make no warranties, express or
\r
24 * implied, as to noninfringement of third party rights, merchantability, or
\r
25 * fitness for any particular purpose. In no event will Percepio AB, its
\r
26 * technology partners, or distributors be liable to you for any consequential,
\r
27 * incidental or special damages, including any lost profits or lost savings,
\r
28 * even if a representative of Percepio AB has been advised of the possibility
\r
29 * of such damages, or for any claim by any third party. Some jurisdictions do
\r
30 * not allow the exclusion or limitation of incidental, consequential or special
\r
31 * damages, or the exclusion of implied warranties or limitations on how long an
\r
32 * implied warranty may last, so the above limitations may not apply to you.
\r
34 * Copyright Percepio AB, 2013.
\r
36 ******************************************************************************/
\r
39 #ifndef TRCKERNELPORT_H_
\r
40 #define TRCKERNELPORT_H_
\r
42 #include "FreeRTOS.h" // Defines configUSE_TRACE_FACILITY
\r
44 #define USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY
\r
46 #if (USE_TRACEALYZER_RECORDER == 1)
\r
48 /* Defines that must be set for the recorder to work properly */
\r
49 #define TRACE_KERNEL_VERSION 0x1AA1
\r
50 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */
\r
52 /************************************************************************/
\r
53 /* KERNEL SPECIFIC OBJECT CONFIGURATION */
\r
54 /************************************************************************/
\r
55 #define TRACE_NCLASSES 5
\r
56 #define TRACE_CLASS_QUEUE ((traceObjectClass)0)
\r
57 #define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)
\r
58 #define TRACE_CLASS_MUTEX ((traceObjectClass)2)
\r
59 #define TRACE_CLASS_TASK ((traceObjectClass)3)
\r
60 #define TRACE_CLASS_ISR ((traceObjectClass)4)
\r
62 #define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR)
\r
64 /* The size of the Object Property Table entries, in bytes, per object */
\r
66 /* Queue properties (except name): current number of message in queue */
\r
67 #define PropertyTableSizeQueue (NameLenQueue + 1)
\r
69 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */
\r
70 #define PropertyTableSizeSemaphore (NameLenSemaphore + 1)
\r
72 /* Mutex properties (except name): owner (task handle, 0 = free) */
\r
73 #define PropertyTableSizeMutex (NameLenMutex + 1)
\r
75 /* Task properties (except name): Byte 0: Current priority
\r
76 Byte 1: state (if already active)
\r
77 Byte 2: legacy, not used
\r
78 Byte 3: legacy, not used */
\r
79 #define PropertyTableSizeTask (NameLenTask + 4)
\r
81 /* ISR properties: Byte 0: priority
\r
82 Byte 1: state (if already active) */
\r
83 #define PropertyTableSizeISR (NameLenISR + 2)
\r
85 /* The layout of the byte array representing the Object Property Table */
\r
86 #define StartIndexQueue 0
\r
87 #define StartIndexSemaphore StartIndexQueue + NQueue * PropertyTableSizeQueue
\r
88 #define StartIndexMutex StartIndexSemaphore + NSemaphore * PropertyTableSizeSemaphore
\r
89 #define StartIndexTask StartIndexMutex + NMutex * PropertyTableSizeMutex
\r
90 #define StartIndexISR StartIndexTask + NTask * PropertyTableSizeTask
\r
92 /* Number of bytes used by the object table */
\r
93 #define TRACE_OBJECT_TABLE_SIZE StartIndexISR + NISR * PropertyTableSizeISR
\r
97 #include "trcTypes.h"
\r
98 #include "trcConfig.h"
\r
99 #include "trcKernelHooks.h"
\r
100 #include "trcHardwarePort.h"
\r
101 #include "trcBase.h"
\r
102 #include "trcKernel.h"
\r
103 #include "trcUser.h"
\r
105 #if (INCLUDE_NEW_TIME_EVENTS == 1 && configUSE_TICKLESS_IDLE != 0)
\r
106 #error "NewTime events can not be used in combination with tickless idle!"
\r
109 /* Initialization of the object property table */
\r
110 void vTraceInitObjectPropertyTable(void);
\r
112 /* Initialization of the handle mechanism, see e.g, xTraceGetObjectHandle */
\r
113 void vTraceInitObjectHandleStack(void);
\r
115 /* Returns the "Not enough handles" error message for the specified object class */
\r
116 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
\r
118 /*******************************************************************************
\r
119 * The event codes - should match the offline config file.
\r
121 * Some sections below are encoded to allow for constructions like:
\r
123 * vTraceStoreKernelCall(EVENTGROUP_CREATE + objectclass, ...
\r
125 * The object class ID is given by the three LSB bits, in such cases. Since each
\r
126 * object class has a separate object property table, the class ID is needed to
\r
127 * know what section in the object table to use for getting an object name from
\r
128 * an object handle.
\r
129 ******************************************************************************/
\r
131 #define NULL_EVENT (0x00) /* Ignored in the analysis*/
\r
133 /*******************************************************************************
\r
136 * Miscellaneous events.
\r
137 ******************************************************************************/
\r
138 #define EVENTGROUP_DIV (NULL_EVENT + 1) /*0x01*/
\r
139 #define DIV_XPS (EVENTGROUP_DIV + 0) /*0x01*/
\r
140 #define DIV_TASK_READY (EVENTGROUP_DIV + 1) /*0x02*/
\r
141 #define DIV_NEW_TIME (EVENTGROUP_DIV + 2) /*0x03*/
\r
143 /*******************************************************************************
\r
146 * Events for storing task-switches and interrupts. The RESUME events are
\r
147 * generated if the task/interrupt is already marked active.
\r
148 ******************************************************************************/
\r
149 #define EVENTGROUP_TS (EVENTGROUP_DIV + 3) /*0x04*/
\r
150 #define TS_ISR_BEGIN (EVENTGROUP_TS + 0) /*0x04*/
\r
151 #define TS_ISR_RESUME (EVENTGROUP_TS + 1) /*0x05*/
\r
152 #define TS_TASK_BEGIN (EVENTGROUP_TS + 2) /*0x06*/
\r
153 #define TS_TASK_RESUME (EVENTGROUP_TS + 3) /*0x07*/
\r
155 /*******************************************************************************
\r
156 * EVENTGROUP_OBJCLOSE_NAME
\r
158 * About Close Events
\r
159 * When an object is evicted from the object property table (object close), two
\r
160 * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and
\r
161 * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object
\r
162 * properties valid up to this point.
\r
163 ******************************************************************************/
\r
164 #define EVENTGROUP_OBJCLOSE_NAME (EVENTGROUP_TS + 4) /*0x08*/
\r
166 /*******************************************************************************
\r
167 * EVENTGROUP_OBJCLOSE_PROP
\r
169 * The internal event carrying properties of deleted objects
\r
170 * The handle and object class of the closed object is not stored in this event,
\r
171 * but is assumed to be the same as in the preceding CLOSE event. Thus, these
\r
172 * two events must be generated from within a critical section.
\r
173 * When queues are closed, arg1 is the "state" property (i.e., number of
\r
174 * buffered messages/signals).
\r
175 * When actors are closed, arg1 is priority, arg2 is handle of the "instance
\r
176 * finish" event, and arg3 is event code of the "instance finish" event.
\r
177 * In this case, the lower three bits is the object class of the instance finish
\r
178 * handle. The lower three bits are not used (always zero) when queues are
\r
179 * closed since the queue type is given in the previous OBJCLOSE_NAME event.
\r
180 ******************************************************************************/
\r
181 #define EVENTGROUP_OBJCLOSE_PROP (EVENTGROUP_OBJCLOSE_NAME + 8) /*0x10*/
\r
183 /*******************************************************************************
\r
184 * EVENTGROUP_CREATE
\r
186 * The events in this group are used to log Kernel object creations.
\r
187 * The lower three bits in the event code gives the object class, i.e., type of
\r
188 * create operation (task, queue, semaphore, etc).
\r
189 ******************************************************************************/
\r
190 #define EVENTGROUP_CREATE_SUCCESS (EVENTGROUP_OBJCLOSE_PROP + 8) /*0x18*/
\r
192 /*******************************************************************************
\r
195 * The events in this group are used to log Send/Give events on queues,
\r
196 * semaphores and mutexes The lower three bits in the event code gives the
\r
197 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
199 ******************************************************************************/
\r
200 #define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_SUCCESS + 8) /*0x20*/
\r
202 /*******************************************************************************
\r
203 * EVENTGROUP_RECEIVE
\r
205 * The events in this group are used to log Receive/Take events on queues,
\r
206 * semaphores and mutexes. The lower three bits in the event code gives the
\r
207 * object class, i.e., what type of object that is operated on (queue, semaphore
\r
209 ******************************************************************************/
\r
210 #define EVENTGROUP_RECEIVE_SUCCESS (EVENTGROUP_SEND_SUCCESS + 8) /*0x28*/
\r
212 /* Send/Give operations, from ISR */
\r
213 #define EVENTGROUP_SEND_FROM_ISR_SUCCESS (EVENTGROUP_RECEIVE_SUCCESS + 8) /*0x30*/
\r
215 /* Receive/Take operations, from ISR */
\r
216 #define EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS (EVENTGROUP_SEND_FROM_ISR_SUCCESS + 8) /*0x38*/
\r
218 /* "Failed" event type versions of above (timeout, failed allocation, etc) */
\r
219 #define EVENTGROUP_KSE_FAILED (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8) /*0x40*/
\r
221 /* Failed create calls - memory allocation failed */
\r
222 #define EVENTGROUP_CREATE_FAILED (EVENTGROUP_KSE_FAILED) /*0x40*/
\r
224 /* Failed send/give - timeout! */
\r
225 #define EVENTGROUP_SEND_FAILED (EVENTGROUP_CREATE_FAILED + 8) /*0x48*/
\r
227 /* Failed receive/take - timeout! */
\r
228 #define EVENTGROUP_RECEIVE_FAILED (EVENTGROUP_SEND_FAILED + 8) /*0x50*/
\r
230 /* Failed non-blocking send/give - queue full */
\r
231 #define EVENTGROUP_SEND_FROM_ISR_FAILED (EVENTGROUP_RECEIVE_FAILED + 8) /*0x58*/
\r
233 /* Failed non-blocking receive/take - queue empty */
\r
234 #define EVENTGROUP_RECEIVE_FROM_ISR_FAILED \
\r
235 (EVENTGROUP_SEND_FROM_ISR_FAILED + 8) /*0x60*/
\r
237 /* Events when blocking on receive/take */
\r
238 #define EVENTGROUP_RECEIVE_BLOCK \
\r
239 (EVENTGROUP_RECEIVE_FROM_ISR_FAILED + 8) /*0x68*/
\r
241 /* Events when blocking on send/give */
\r
242 #define EVENTGROUP_SEND_BLOCK (EVENTGROUP_RECEIVE_BLOCK + 8) /*0x70*/
\r
244 /* Events on queue peek (receive) */
\r
245 #define EVENTGROUP_PEEK_SUCCESS (EVENTGROUP_SEND_BLOCK + 8) /*0x78*/
\r
247 /* Events on object delete (vTaskDelete or vQueueDelete) */
\r
248 #define EVENTGROUP_DELETE_SUCCESS (EVENTGROUP_PEEK_SUCCESS + 8) /*0x80*/
\r
250 /* Other events - object class is implied: TASK */
\r
251 #define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_SUCCESS + 8) /*0x88*/
\r
252 #define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0) /*0x88*/
\r
253 #define TASK_DELAY (EVENTGROUP_OTHERS + 1) /*0x89*/
\r
254 #define TASK_SUSPEND (EVENTGROUP_OTHERS + 2) /*0x8A*/
\r
255 #define TASK_RESUME (EVENTGROUP_OTHERS + 3) /*0x8B*/
\r
256 #define TASK_RESUME_FROM_ISR (EVENTGROUP_OTHERS + 4) /*0x8C*/
\r
257 #define TASK_PRIORITY_SET (EVENTGROUP_OTHERS + 5) /*0x8D*/
\r
258 #define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6) /*0x8E*/
\r
259 #define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7) /*0x8F*/
\r
262 #define EVENTGROUP_FTRACE_PLACEHOLDER (EVENTGROUP_OTHERS + 8) /*0x90*/
\r
265 #define EVENTGROUP_USEREVENT (EVENTGROUP_FTRACE_PLACEHOLDER + 8) /*0x98*/
\r
266 #define USER_EVENT (EVENTGROUP_USEREVENT + 0)
\r
268 /* Allow for 0-15 arguments (the number of args is added to event code) */
\r
269 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15) /*0xA7*/
\r
271 /*******************************************************************************
\r
272 * XTS Event - eXtended TimeStamp events
\r
273 * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.
\r
274 * the time since the last stored event. The DTS fields are either 1 or 2 bytes
\r
275 * in the other events, depending on the bytes available in the event struct.
\r
276 * If the time since the last event (the DTS) is larger than allowed for by
\r
277 * the DTS field of the current event, an XTS event is inserted immediately
\r
278 * before the original event. The XTS event contains up to 3 additional bytes
\r
279 * of the DTS value - the higher bytes of the true DTS value. The lower 1-2
\r
280 * bytes are stored in the normal DTS field.
\r
281 * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored
\r
282 * when there is only room for 1 byte (8 bit) DTS data in the original event,
\r
283 * which means a limit of 0xFF (255). The XTS16 is used when the original event
\r
284 * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535).
\r
286 * Using a very high frequency time base can result in many XTS events.
\r
287 * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,
\r
288 * at most 65535. If your time base has a higher frequency, you can define
\r
290 ******************************************************************************/
\r
292 #define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16) /*0xA8*/
\r
293 #define XTS8 (EVENTGROUP_SYS + 0) /*0xA8*/
\r
294 #define XTS16 (EVENTGROUP_SYS + 1) /*0xA9*/
\r
296 #define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2) /*0xAA*/
\r
298 #define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3) /*0xAB*/
\r
300 #define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4) /*0xAC*/
\r
301 #define LOW_POWER_END (EVENTGROUP_SYS + 5) /*0xAD*/
\r
305 /************************************************************************/
\r
306 /* KERNEL SPECIFIC DATA AND FUNCTIONS NEEDED TO PROVIDE THE */
\r
307 /* FUNCTIONALITY REQUESTED BY THE TRACE RECORDER */
\r
308 /************************************************************************/
\r
310 /******************************************************************************
\r
311 * TraceObjectClassTable
\r
312 * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_).
\r
313 * This was added since we want to map both types of Mutex and both types of
\r
314 * Semaphores on common classes for all Mutexes and all Semaphores respectively.
\r
316 * FreeRTOS Queue types
\r
317 * #define queueQUEUE_TYPE_BASE (0U) => TRACE_CLASS_QUEUE
\r
318 * #define queueQUEUE_TYPE_MUTEX (1U) => TRACE_CLASS_MUTEX
\r
319 * #define queueQUEUE_TYPE_COUNTING_SEMAPHORE (2U) => TRACE_CLASS_SEMAPHORE
\r
320 * #define queueQUEUE_TYPE_BINARY_SEMAPHORE (3U) => TRACE_CLASS_SEMAPHORE
\r
321 * #define queueQUEUE_TYPE_RECURSIVE_MUTEX (4U) => TRACE_CLASS_MUTEX
\r
322 ******************************************************************************/
\r
324 extern traceObjectClass TraceObjectClassTable[5];
\r
326 /* These functions are implemented in the .c file since certain header files must not be included in this one */
\r
327 objectHandleType prvTraceGetObjectNumber(void* handle);
\r
328 unsigned char prvTraceGetObjectType(void* handle);
\r
329 objectHandleType prvTraceGetTaskNumber(void* handle);
\r
330 unsigned char prvTraceIsSchedulerActive(void);
\r
331 unsigned char prvTraceIsSchedulerSuspended(void);
\r
332 unsigned char prvTraceIsSchedulerStarted(void);
\r
333 void prvTraceEnterCritical(void);
\r
334 void prvTraceExitCritical(void);
\r
335 void* prvTraceGetCurrentTaskHandle(void);
\r
338 /************************************************************************/
\r
339 /* KERNEL SPECIFIC MACROS USED BY THE TRACE RECORDER */
\r
340 /************************************************************************/
\r
342 #define TRACE_MALLOC(size) pvPortMalloc(size)
\r
344 #define TRACE_ENTER_CRITICAL_SECTION() prvTraceEnterCritical();
\r
345 #define TRACE_EXIT_CRITICAL_SECTION() prvTraceExitCritical();
\r
347 #define TRACE_IS_SCHEDULER_ACTIVE() prvTraceIsSchedulerActive()
\r
348 #define TRACE_IS_SCHEDULER_STARTED() prvTraceIsSchedulerStarted()
\r
349 #define TRACE_IS_SCHEDULER_SUSPENDED() prvTraceIsSchedulerSuspended()
\r
350 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle()
\r
352 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)
\r
353 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName)
\r
354 #define TRACE_GET_TASK_NUMBER(pxTCB) (prvTraceGetTaskNumber(pxTCB))
\r
355 #define TRACE_SET_TASK_NUMBER(pxTCB) pxTCB->uxTaskNumber = xTraceGetObjectHandle(TRACE_CLASS_TASK);
\r
357 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]
\r
358 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))
\r
360 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))
\r
361 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->ucQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));
\r
363 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))
\r
364 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))
\r
365 #define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)
\r
367 /************************************************************************/
\r
368 /* KERNEL SPECIFIC WRAPPERS THAT SHOULD BE CALLED BY THE KERNEL */
\r
369 /************************************************************************/
\r
371 #if (configUSE_TICKLESS_IDLE != 0)
\r
373 #undef traceLOW_POWER_IDLE_BEGIN
\r
374 #define traceLOW_POWER_IDLE_BEGIN() \
\r
376 extern uint32_t trace_disable_timestamp; \
\r
377 vTraceStoreLowPower(0); \
\r
378 trace_disable_timestamp = 1; \
\r
381 #undef traceLOW_POWER_IDLE_END
\r
382 #define traceLOW_POWER_IDLE_END() \
\r
384 extern uint32_t trace_disable_timestamp; \
\r
385 trace_disable_timestamp = 0; \
\r
386 vTraceStoreLowPower(1); \
\r
389 /* A macro that will update the tick count when returning from tickless idle */
\r
390 #undef traceINCREASE_TICK_COUNT( xCount )
\r
391 #define traceINCREASE_TICK_COUNT( xCount ) { extern uint32_t uiTraceTickCount; uiTraceTickCount += xTickCount; }
\r
395 /* Called for each task that becomes ready */
\r
396 #undef traceMOVED_TASK_TO_READY_STATE
\r
397 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
\r
398 trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);
\r
400 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
\r
401 #undef traceTASK_INCREMENT_TICK
\r
402 #define traceTASK_INCREMENT_TICK( xTickCount ) \
\r
403 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
\r
404 if (uxSchedulerSuspended == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
\r
406 /* Called on each task-switch */
\r
407 #undef traceTASK_SWITCHED_IN
\r
408 #define traceTASK_SWITCHED_IN() \
\r
409 trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());
\r
411 /* Called on vTaskSuspend */
\r
412 #undef traceTASK_SUSPEND
\r
413 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
\r
414 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);
\r
416 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */
\r
417 #undef traceTASK_DELAY
\r
418 #define traceTASK_DELAY() \
\r
419 TRACE_ENTER_CRITICAL_SECTION(); \
\r
420 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \
\r
421 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED,pxCurrentTCB); \
\r
422 TRACE_EXIT_CRITICAL_SECTION();
\r
424 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */
\r
425 #undef traceTASK_DELAY_UNTIL
\r
426 #define traceTASK_DELAY_UNTIL() \
\r
427 TRACE_ENTER_CRITICAL_SECTION(); \
\r
428 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \
\r
429 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED,pxCurrentTCB); \
\r
430 TRACE_EXIT_CRITICAL_SECTION();
\r
432 #if (INCLUDE_OBJECT_DELETE == 1)
\r
433 /* Called on vTaskDelete */
\r
434 #undef traceTASK_DELETE
\r
435 #define traceTASK_DELETE( pxTaskToDelete ) \
\r
436 trcKERNEL_HOOKS_TASK_DELETE(DELETE, pxTaskToDelete);
\r
439 #if (INCLUDE_OBJECT_DELETE == 1)
\r
440 /* Called on vQueueDelete */
\r
441 #undef traceQUEUE_DELETE
\r
442 #define traceQUEUE_DELETE( pxQueue ) \
\r
443 TRACE_ENTER_CRITICAL_SECTION(); \
\r
444 trcKERNEL_HOOKS_OBJECT_DELETE(DELETE, UNUSED, pxQueue); \
\r
445 TRACE_EXIT_CRITICAL_SECTION();
\r
448 /* Called on vTaskCreate */
\r
449 #undef traceTASK_CREATE
\r
450 #define traceTASK_CREATE(pxNewTCB) \
\r
451 if (pxNewTCB != NULL) \
\r
453 trcKERNEL_HOOKS_TASK_CREATE(CREATE, pxNewTCB); \
\r
456 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
\r
457 #undef traceTASK_CREATE_FAILED
\r
458 #define traceTASK_CREATE_FAILED() \
\r
459 TRACE_ENTER_CRITICAL_SECTION(); \
\r
460 trcKERNEL_HOOKS_TASK_CREATE_FAILED(CREATE); \
\r
461 TRACE_EXIT_CRITICAL_SECTION();
\r
463 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
\r
464 #undef traceQUEUE_CREATE
\r
465 #define traceQUEUE_CREATE( pxNewQueue )\
\r
466 TRACE_ENTER_CRITICAL_SECTION(); \
\r
467 trcKERNEL_HOOKS_OBJECT_CREATE(CREATE, UNUSED, pxNewQueue); \
\r
468 TRACE_EXIT_CRITICAL_SECTION();
\r
470 /* Called in xQueueCreate, if the queue creation fails */
\r
471 #undef traceQUEUE_CREATE_FAILED
\r
472 #define traceQUEUE_CREATE_FAILED( queueType ) \
\r
473 TRACE_ENTER_CRITICAL_SECTION(); \
\r
474 trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE, UNUSED, queueType); \
\r
475 TRACE_EXIT_CRITICAL_SECTION();
\r
477 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */
\r
478 #undef traceCREATE_MUTEX
\r
479 #define traceCREATE_MUTEX( pxNewQueue ) \
\r
480 TRACE_ENTER_CRITICAL_SECTION(); \
\r
481 trcKERNEL_HOOKS_OBJECT_CREATE(CREATE, UNUSED, pxNewQueue); \
\r
482 TRACE_EXIT_CRITICAL_SECTION();
\r
484 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */
\r
485 #undef traceCREATE_MUTEX_FAILED
\r
486 #define traceCREATE_MUTEX_FAILED() \
\r
487 TRACE_ENTER_CRITICAL_SECTION(); \
\r
488 trcKERNEL_HOOKS_OBJECT_CREATE_FAILED(CREATE, UNUSED, queueQUEUE_TYPE_MUTEX); \
\r
489 TRACE_EXIT_CRITICAL_SECTION();
\r
491 /* Called when the Mutex can not be given, since not holder */
\r
492 #undef traceGIVE_MUTEX_RECURSIVE_FAILED
\r
493 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \
\r
494 TRACE_ENTER_CRITICAL_SECTION(); \
\r
495 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxMutex); \
\r
496 TRACE_EXIT_CRITICAL_SECTION();
\r
498 /* Called when a message is sent to a queue */
\r
499 #undef traceQUEUE_SEND
\r
500 #define traceQUEUE_SEND( pxQueue ) \
\r
501 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, SUCCESS, UNUSED, pxQueue); \
\r
502 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1)); /*For mutex, store the new owner rather than queue length */
\r
504 /* Called when a message failed to be sent to a queue (timeout) */
\r
505 #undef traceQUEUE_SEND_FAILED
\r
506 #define traceQUEUE_SEND_FAILED( pxQueue ) \
\r
507 TRACE_ENTER_CRITICAL_SECTION();\
\r
508 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, FAILED, UNUSED, pxQueue); \
\r
509 TRACE_EXIT_CRITICAL_SECTION();
\r
511 /* Called when the task is blocked due to a send operation on a full queue */
\r
512 #undef traceBLOCKING_ON_QUEUE_SEND
\r
513 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
\r
514 TRACE_ENTER_CRITICAL_SECTION();\
\r
515 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND, BLOCK, UNUSED, pxQueue); \
\r
516 TRACE_EXIT_CRITICAL_SECTION();
\r
518 /* Called when a message is received from a queue */
\r
519 #undef traceQUEUE_RECEIVE
\r
520 #define traceQUEUE_RECEIVE( pxQueue ) \
\r
521 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, SUCCESS, UNUSED, pxQueue); \
\r
522 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) == TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1)); /*For mutex, store the new owner rather than queue length */
\r
524 /* Called when a receive operation on a queue fails (timeout) */
\r
525 #undef traceQUEUE_RECEIVE_FAILED
\r
526 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
\r
527 TRACE_ENTER_CRITICAL_SECTION(); \
\r
528 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, FAILED, UNUSED, pxQueue); \
\r
529 TRACE_EXIT_CRITICAL_SECTION();
\r
531 /* Called when the task is blocked due to a receive operation on an empty queue */
\r
532 #undef traceBLOCKING_ON_QUEUE_RECEIVE
\r
533 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
\r
534 TRACE_ENTER_CRITICAL_SECTION(); \
\r
535 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE, BLOCK, UNUSED, pxQueue); \
\r
536 if (TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, pxQueue) != TRACE_CLASS_MUTEX) \
\r
538 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(UNUSED, pxQueue); \
\r
540 TRACE_EXIT_CRITICAL_SECTION();
\r
542 /* Called on xQueuePeek */
\r
543 #undef traceQUEUE_PEEK
\r
544 #define traceQUEUE_PEEK( pxQueue ) \
\r
545 trcKERNEL_HOOKS_KERNEL_SERVICE(PEEK, SUCCESS, UNUSED, pxQueue);
\r
547 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
\r
548 #undef traceQUEUE_SEND_FROM_ISR
\r
549 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
\r
550 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, SUCCESS, UNUSED, pxQueue); \
\r
551 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1));
\r
553 /* Called when a message send from interrupt context fails (since the queue was full) */
\r
554 #undef traceQUEUE_SEND_FROM_ISR_FAILED
\r
555 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
\r
556 trcKERNEL_HOOKS_KERNEL_SERVICE(SEND_FROM_ISR, FAILED, UNUSED, pxQueue);
\r
558 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
\r
559 #undef traceQUEUE_RECEIVE_FROM_ISR
\r
560 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
\r
561 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, SUCCESS, UNUSED, pxQueue); \
\r
562 trcKERNEL_HOOKS_SET_OBJECT_STATE(UNUSED, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1));
\r
564 /* Called when a message receive from interrupt context fails (since the queue was empty) */
\r
565 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
\r
566 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
\r
567 trcKERNEL_HOOKS_KERNEL_SERVICE(RECEIVE_FROM_ISR, FAILED, UNUSED, pxQueue);
\r
569 /* Called in vTaskPrioritySet */
\r
570 #undef traceTASK_PRIORITY_SET
\r
571 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
\r
572 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);
\r
574 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
\r
575 #undef traceTASK_PRIORITY_INHERIT
\r
576 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
\r
577 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);
\r
579 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
\r
580 #undef traceTASK_PRIORITY_DISINHERIT
\r
581 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
\r
582 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);
\r
584 /* Called in vTaskResume */
\r
585 #undef traceTASK_RESUME
\r
586 #define traceTASK_RESUME( pxTaskToResume ) \
\r
587 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);
\r
589 /* Called in vTaskResumeFromISR */
\r
590 #undef traceTASK_RESUME_FROM_ISR
\r
591 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
\r
592 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);
\r
595 /************************************************************************/
\r
596 /* KERNEL SPECIFIC MACROS TO EXCLUDE OR INCLUDE THINGS IN TRACE */
\r
597 /************************************************************************/
\r
599 /* Returns the exclude state of the object */
\r
600 uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, objectHandleType handle);
\r
602 #define TRACE_SET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)
\r
603 #define TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, queueIndex)
\r
604 #define TRACE_GET_QUEUE_FLAG_ISEXCLUDED(queueIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, queueIndex)
\r
606 #define TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)
\r
607 #define TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)
\r
608 #define TRACE_GET_SEMAPHORE_FLAG_ISEXCLUDED(semaphoreIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+semaphoreIndex)
\r
610 #define TRACE_SET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)
\r
611 #define TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)
\r
612 #define TRACE_GET_MUTEX_FLAG_ISEXCLUDED(mutexIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+mutexIndex)
\r
614 #define TRACE_SET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
\r
615 #define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
\r
616 #define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
\r
618 #define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
\r
619 switch (objectclass) \
\r
621 case TRACE_CLASS_QUEUE: \
\r
622 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(handle); \
\r
624 case TRACE_CLASS_SEMAPHORE: \
\r
625 TRACE_CLEAR_SEMAPHORE_FLAG_ISEXCLUDED(handle); \
\r
627 case TRACE_CLASS_MUTEX: \
\r
628 TRACE_CLEAR_MUTEX_FLAG_ISEXCLUDED(handle); \
\r
630 case TRACE_CLASS_TASK: \
\r
631 TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(handle); \
\r
635 #define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
\r
636 switch (objectclass) \
\r
638 case TRACE_CLASS_QUEUE: \
\r
639 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(handle); \
\r
641 case TRACE_CLASS_SEMAPHORE: \
\r
642 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(handle); \
\r
644 case TRACE_CLASS_MUTEX: \
\r
645 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(handle); \
\r
647 case TRACE_CLASS_TASK: \
\r
648 TRACE_SET_TASK_FLAG_ISEXCLUDED(handle); \
\r
653 #define vTraceExcludeTaskFromTrace(handle) \
\r
654 TRACE_SET_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));
\r
656 #define vTraceIncludeTaskInTrace(handle) \
\r
657 TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(TRACE_GET_TASK_NUMBER(handle));
\r
661 #define vTraceExcludeQueueFromTrace(handle) \
\r
662 TRACE_SET_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
664 #define vTraceIncludeQueueInTrace(handle) \
\r
665 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
669 #define vTraceExcludeSemaphoreFromTrace(handle) \
\r
670 TRACE_SET_SEMAPHORE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
672 #define vTraceIncludeSemaphoreInTrace(handle) \
\r
673 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
677 #define vTraceExcludeMutexFromTrace(handle) \
\r
678 TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
680 #define vTraceIncludeMutexInTrace(handle) \
\r
681 TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
\r
684 /* Kernel Services */
\r
685 #define vTraceExcludeKernelServiceDelayFromTrace() \
\r
686 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \
\r
687 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);
\r
689 #define vTraceIncludeKernelServiceDelayInTrace() \
\r
690 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY); \
\r
691 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(TASK_DELAY_UNTIL);
\r
693 /* HELPER MACROS FOR KERNEL SERVICES FOR OBJECTS */
\r
694 #define vTraceExcludeKernelServiceSendFromTrace_HELPER(class) \
\r
695 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \
\r
696 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \
\r
697 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \
\r
698 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \
\r
699 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);
\r
701 #define vTraceIncludeKernelServiceSendInTrace_HELPER(class) \
\r
702 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_SUCCESS + class); \
\r
703 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_BLOCK + class); \
\r
704 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FAILED + class); \
\r
705 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_SUCCESS + class); \
\r
706 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_SEND_FROM_ISR_FAILED + class);
\r
708 #define vTraceExcludeKernelServiceReceiveFromTrace_HELPER(class) \
\r
709 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \
\r
710 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \
\r
711 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \
\r
712 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \
\r
713 TRACE_SET_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);
\r
715 #define vTraceIncludeKernelServiceReceiveInTrace_HELPER(class) \
\r
716 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_SUCCESS + class); \
\r
717 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_BLOCK + class); \
\r
718 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FAILED + class); \
\r
719 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + class); \
\r
720 TRACE_CLEAR_EVENT_CODE_FLAG_ISEXCLUDED(EVENTGROUP_RECEIVE_FROM_ISR_FAILED + class);
\r
722 /* EXCLUDE AND INCLUDE FOR QUEUE */
\r
723 #define vTraceExcludeKernelServiceQueueSendFromTrace() \
\r
724 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_QUEUE);
\r
726 #define vTraceIncludeKernelServiceQueueSendInTrace() \
\r
727 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_QUEUE);
\r
729 #define vTraceExcludeKernelServiceQueueReceiveFromTrace() \
\r
730 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_QUEUE);
\r
732 #define vTraceIncludeKernelServiceQueueReceiveInTrace() \
\r
733 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_QUEUE);
\r
735 /* EXCLUDE AND INCLUDE FOR SEMAPHORE */
\r
736 #define vTraceExcludeKernelServiceSemaphoreSendFromTrace() \
\r
737 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
739 #define vTraceIncludeKernelServicSemaphoreSendInTrace() \
\r
740 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
742 #define vTraceExcludeKernelServiceSemaphoreReceiveFromTrace() \
\r
743 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
745 #define vTraceIncludeKernelServiceSemaphoreReceiveInTrace() \
\r
746 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_SEMAPHORE);
\r
748 /* EXCLUDE AND INCLUDE FOR MUTEX */
\r
749 #define vTraceExcludeKernelServiceMutexSendFromTrace() \
\r
750 vTraceExcludeKernelServiceSendFromTrace_HELPER(TRACE_CLASS_MUTEX);
\r
752 #define vTraceIncludeKernelServiceMutexSendInTrace() \
\r
753 vTraceIncludeKernelServiceSendInTrace_HELPER(TRACE_CLASS_MUTEX);
\r
755 #define vTraceExcludeKernelServiceMutexReceiveFromTrace() \
\r
756 vTraceExcludeKernelServiceReceiveFromTrace_HELPER(TRACE_CLASS_MUTEX);
\r
758 #define vTraceIncludeKernelServiceMutexReceiveInTrace() \
\r
759 vTraceIncludeKernelServiceReceiveInTrace_HELPER(TRACE_CLASS_MUTEX);
\r
761 /************************************************************************/
\r
762 /* KERNEL SPECIFIC MACROS TO NAME OBJECTS, IF NECESSARY */
\r
763 /************************************************************************/
\r
764 #define vTraceSetQueueName(object, name) \
\r
765 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
\r
767 #define vTraceSetSemaphoreName(object, name) \
\r
768 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
\r
770 #define vTraceSetMutexName(object, name) \
\r
771 vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
\r
775 #endif /* TRCKERNELPORT_H_ */