@ -1,5 +1,5 @@
/*******************************************************************************
* Tracealyzer v2 . 5 .0 Recorder Library
* Tracealyzer v2 . 6 .0 Recorder Library
* Percepio AB , www . percepio . com
*
* trcKernelPort . h
@ -40,6 +40,9 @@
# define TRCKERNELPORT_H_
# include "FreeRTOS.h" // Defines configUSE_TRACE_FACILITY
# include "trcHardwarePort.h"
extern int uiInEventGroupSetBitsFromISR ;
# define USE_TRACEALYZER_RECORDER configUSE_TRACE_FACILITY
@ -48,18 +51,42 @@
/* Defines that must be set for the recorder to work properly */
# define TRACE_KERNEL_VERSION 0x1AA1
# define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */
# define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOSConfig.h" */
# if ((SELECTED_PORT == PORT_ARM_CortexM) && (USE_PRIMASK_CS == 1))
# define TRACE_SR_ALLOC_CRITICAL_SECTION() int __irq_status;
# define TRACE_ENTER_CRITICAL_SECTION() { __irq_status = prvTraceGetIRQMask(); prvTraceDisableIRQ(); }
# define TRACE_EXIT_CRITICAL_SECTION() { prvTraceSetIRQMask(__irq_status); }
# define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_BEGIN
# define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY trcCRITICAL_SECTION_END
# else
# define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL()
# define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL()
# define trcCRITICAL_SECTION_BEGIN_ON_CORTEX_M_ONLY() recorder_busy++;
# define trcCRITICAL_SECTION_END_ON_CORTEX_M_ONLY() recorder_busy--;
# endif
/************************************************************************/
/* KERNEL SPECIFIC OBJECT CONFIGURATION */
/************************************************************************/
# define TRACE_NCLASSES 5
# define TRACE_NCLASSES 7
# define TRACE_CLASS_QUEUE ((traceObjectClass)0)
# define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1)
# define TRACE_CLASS_MUTEX ((traceObjectClass)2)
# define TRACE_CLASS_TASK ((traceObjectClass)3)
# define TRACE_CLASS_ISR ((traceObjectClass)4)
# define TRACE_CLASS_TIMER ((traceObjectClass)5)
# define TRACE_CLASS_EVENTGROUP ((traceObjectClass)6)
# define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR)
# define TRACE_KERNEL_OBJECT_COUNT (NQueue + NSemaphore + NMutex + NTask + NISR + NTimer + NEventGroup )
/* The size of the Object Property Table entries, in bytes, per object */
@ -82,22 +109,30 @@
Byte 1 : state ( if already active ) */
# define PropertyTableSizeISR (NameLenISR + 2)
/* NTimer properties: Byte 0: state (unused for now) */
# define PropertyTableSizeTimer (NameLenTimer + 1)
/* NEventGroup properties: Byte 0-3: state (unused for now)*/
# define PropertyTableSizeEventGroup (NameLenEventGroup + 4)
/* The layout of the byte array representing the Object Property Table */
# define StartIndexQueue 0
# define StartIndexSemaphore StartIndexQueue + NQueue * PropertyTableSizeQueue
# define StartIndexMutex StartIndexSemaphore + NSemaphore * PropertyTableSizeSemaphore
# define StartIndexTask StartIndexMutex + NMutex * PropertyTableSizeMutex
# define StartIndexISR StartIndexTask + NTask * PropertyTableSizeTask
# define StartIndexTimer StartIndexISR + NISR * PropertyTableSizeISR
# define StartIndexEventGroup StartIndexTimer + NTimer * PropertyTableSizeTimer
/* Number of bytes used by the object table */
# define TRACE_OBJECT_TABLE_SIZE StartIndexISR + NISR * PropertyTableSizeISR
# define TRACE_OBJECT_TABLE_SIZE StartIndexEventGroup + NEventGroup * PropertyTableSizeEventGroup
/* Includes */
# include "trcConfig.h" /* Must be first, even before trcTypes.h */
# include "trcHardwarePort.h"
# include "trcTypes.h"
# include "trcConfig.h"
# include "trcKernelHooks.h"
# include "trcHardwarePort.h"
# include "trcBase.h"
# include "trcKernel.h"
# include "trcUser.h"
@ -187,7 +222,7 @@ const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
* The lower three bits in the event code gives the object class , i . e . , type of
* create operation ( task , queue , semaphore , etc ) .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define EVENTGROUP_CREATE_ SUCCESS (EVENTGROUP_OBJCLOSE_PROP + 8) /*0x18*/
# define EVENTGROUP_CREATE_ OBJ_ SUCCESS (EVENTGROUP_OBJCLOSE_PROP + 8) /*0x18*/
/*******************************************************************************
* EVENTGROUP_SEND
@ -197,7 +232,7 @@ const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
* object class , i . e . , what type of object that is operated on ( queue , semaphore
* or mutex ) .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_ SUCCESS + 8) /*0x20*/
# define EVENTGROUP_SEND_SUCCESS (EVENTGROUP_CREATE_ OBJ_ SUCCESS + 8) /*0x20*/
/*******************************************************************************
* EVENTGROUP_RECEIVE
@ -219,10 +254,10 @@ const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
# define EVENTGROUP_KSE_FAILED (EVENTGROUP_RECEIVE_FROM_ISR_SUCCESS + 8) /*0x40*/
/* Failed create calls - memory allocation failed */
# define EVENTGROUP_CREATE_ FAILED (EVENTGROUP_KSE_FAILED) /*0x40*/
# define EVENTGROUP_CREATE_ OBJ_ FAILED (EVENTGROUP_KSE_FAILED) /*0x40*/
/* Failed send/give - timeout! */
# define EVENTGROUP_SEND_FAILED (EVENTGROUP_CREATE_ FAILED + 8) /*0x48*/
# define EVENTGROUP_SEND_FAILED (EVENTGROUP_CREATE_ OBJ_ FAILED + 8) /*0x48*/
/* Failed receive/take - timeout! */
# define EVENTGROUP_RECEIVE_FAILED (EVENTGROUP_SEND_FAILED + 8) /*0x50*/
@ -245,10 +280,10 @@ const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
# define EVENTGROUP_PEEK_SUCCESS (EVENTGROUP_SEND_BLOCK + 8) /*0x78*/
/* Events on object delete (vTaskDelete or vQueueDelete) */
# define EVENTGROUP_DELETE_ SUCCESS (EVENTGROUP_PEEK_SUCCESS + 8) /*0x80*/
# define EVENTGROUP_DELETE_ OBJ_ SUCCESS (EVENTGROUP_PEEK_SUCCESS + 8) /*0x80*/
/* Other events - object class is implied: TASK */
# define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_ SUCCESS + 8) /*0x88*/
# define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_ OBJ_ SUCCESS + 8) /*0x88*/
# define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0) /*0x88*/
# define TASK_DELAY (EVENTGROUP_OTHERS + 1) /*0x89*/
# define TASK_SUSPEND (EVENTGROUP_OTHERS + 2) /*0x8A*/
@ -258,11 +293,18 @@ const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
# define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6) /*0x8E*/
# define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7) /*0x8F*/
/* Not yet used */
# define EVENTGROUP_FTRACE_PLACEHOLDER (EVENTGROUP_OTHERS + 8) /*0x90*/
# define EVENTGROUP_MISC_PLACEHOLDER (EVENTGROUP_OTHERS + 8) /*0x90*/
# define PEND_FUNC_CALL (EVENTGROUP_MISC_PLACEHOLDER+0) /*0x90*/
# define PEND_FUNC_CALL_FROM_ISR (EVENTGROUP_MISC_PLACEHOLDER+1) /*0x91*/
# define PEND_FUNC_CALL_FAILED (EVENTGROUP_MISC_PLACEHOLDER+2) /*0x92*/
# define PEND_FUNC_CALL_FROM_ISR_FAILED (EVENTGROUP_MISC_PLACEHOLDER+3) /*0x93*/
# define MEM_MALLOC_SIZE (EVENTGROUP_MISC_PLACEHOLDER+4) /*0x94*/
# define MEM_MALLOC_ADDR (EVENTGROUP_MISC_PLACEHOLDER+5) /*0x95*/
# define MEM_FREE_SIZE (EVENTGROUP_MISC_PLACEHOLDER+6) /*0x96*/
# define MEM_FREE_ADDR (EVENTGROUP_MISC_PLACEHOLDER+7) /*0x97*/
/* User events */
# define EVENTGROUP_USEREVENT (EVENTGROUP_FTRACE_PLACEHOLDER + 8) /*0x98*/
# define EVENTGROUP_USEREVENT (EVENTGROUP_ MISC _PLACEHOLDER + 8) /*0x98*/
# define USER_EVENT (EVENTGROUP_USEREVENT + 0)
/* Allow for 0-15 arguments (the number of args is added to event code) */
@ -300,7 +342,47 @@ const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
# define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4) /*0xAC*/
# define LOW_POWER_END (EVENTGROUP_SYS + 5) /*0xAD*/
# define XID (EVENTGROUP_SYS + 6) /*0xAE*/
# define XTS16L (EVENTGROUP_SYS + 7) /*0xAF*/
# define EVENTGROUP_TIMER (EVENTGROUP_SYS + 8) /*0xB0*/
# define TIMER_CREATE (EVENTGROUP_TIMER + 0) /*0xB0*/
# define TIMER_START (EVENTGROUP_TIMER + 1) /*0xB0*/
# define TIMER_RESET (EVENTGROUP_TIMER + 2) /*0xB1*/
# define TIMER_STOP (EVENTGROUP_TIMER + 3) /*0xB2*/
# define TIMER_CHANGE_PERIOD (EVENTGROUP_TIMER + 4) /*0xB3*/
# define TIMER_DELETE (EVENTGROUP_TIMER + 5) /*0xB4*/
# define TIMER_START_FROM_ISR (EVENTGROUP_TIMER + 6) /*0xB6*/
# define TIMER_RESET_FROM_ISR (EVENTGROUP_TIMER + 7) /*0xB7*/
# define TIMER_STOP_FROM_ISR (EVENTGROUP_TIMER + 8) /*0xB8*/
# define TIMER_CREATE_FAILED (EVENTGROUP_TIMER + 9) /*0xB9*/
# define TIMER_START_FAILED (EVENTGROUP_TIMER + 10) /*0xBA*/
# define TIMER_RESET_FAILED (EVENTGROUP_TIMER + 11) /*0xBB*/
# define TIMER_STOP_FAILED (EVENTGROUP_TIMER + 12) /*0xBC*/
# define TIMER_CHANGE_PERIOD_FAILED (EVENTGROUP_TIMER + 13) /*0xBD*/
# define TIMER_DELETE_FAILED (EVENTGROUP_TIMER + 14) /*0xBE*/
# define TIMER_START_FROM_ISR_FAILED (EVENTGROUP_TIMER + 15) /*0xBF*/
# define TIMER_RESET_FROM_ISR_FAILED (EVENTGROUP_TIMER + 16) /*0xC0*/
# define TIMER_STOP_FROM_ISR_FAILED (EVENTGROUP_TIMER + 17) /*0xC1*/
# define EVENTGROUP_EG (EVENTGROUP_TIMER + 18) /*0xC2*/
# define EVENT_GROUP_CREATE (EVENTGROUP_EG + 0) /*0xC2*/
# define EVENT_GROUP_CREATE_FAILED (EVENTGROUP_EG + 1) /*0xC3*/
# define EVENT_GROUP_SYNC_BLOCK (EVENTGROUP_EG + 2) /*0xC4*/
# define EVENT_GROUP_SYNC_END (EVENTGROUP_EG + 3) /*0xC5*/
# define EVENT_GROUP_WAIT_BITS_BLOCK (EVENTGROUP_EG + 4) /*0xC6*/
# define EVENT_GROUP_WAIT_BITS_END (EVENTGROUP_EG + 5) /*0xC7*/
# define EVENT_GROUP_CLEAR_BITS (EVENTGROUP_EG + 6) /*0xC8*/
# define EVENT_GROUP_CLEAR_BITS_FROM_ISR (EVENTGROUP_EG + 7) /*0xC9*/
# define EVENT_GROUP_SET_BITS (EVENTGROUP_EG + 8) /*0xCA*/
# define EVENT_GROUP_DELETE (EVENTGROUP_EG + 9) /*0xCB*/
# define EVENT_GROUP_SYNC_END_FAILED (EVENTGROUP_EG + 10) /*0xCC*/
# define EVENT_GROUP_WAIT_BITS_END_FAILED (EVENTGROUP_EG + 11) /*0xCD*/
# define EVENT_GROUP_SET_BITS_FROM_ISR (EVENTGROUP_EG + 12) /*0xCE*/
# define EVENT_GROUP_SET_BITS_FROM_ISR_FAILED (EVENTGROUP_EG + 13) /*0xCF*/
/************************************************************************/
/* KERNEL SPECIFIC DATA AND FUNCTIONS NEEDED TO PROVIDE THE */
@ -330,20 +412,18 @@ objectHandleType prvTraceGetTaskNumber(void* handle);
unsigned char prvTraceIsSchedulerActive ( void ) ;
unsigned char prvTraceIsSchedulerSuspended ( void ) ;
unsigned char prvTraceIsSchedulerStarted ( void ) ;
void prvTraceEnterCritical ( void ) ;
void prvTraceExitCritical ( void ) ;
void * prvTraceGetCurrentTaskHandle ( void ) ;
# if (configUSE_TIMERS == 1)
# undef INCLUDE_xTimerGetTimerDaemonTaskHandle
# define INCLUDE_xTimerGetTimerDaemonTaskHandle 1
# endif
/************************************************************************/
/* KERNEL SPECIFIC MACROS USED BY THE TRACE RECORDER */
/************************************************************************/
# define TRACE_MALLOC(size) pvPortMalloc(size)
# define TRACE_ENTER_CRITICAL_SECTION() prvTraceEnterCritical();
# define TRACE_EXIT_CRITICAL_SECTION() prvTraceExitCritical();
# define TRACE_IS_SCHEDULER_ACTIVE() prvTraceIsSchedulerActive()
# define TRACE_IS_SCHEDULER_STARTED() prvTraceIsSchedulerStarted()
# define TRACE_IS_SCHEDULER_SUSPENDED() prvTraceIsSchedulerSuspended()
@ -357,12 +437,20 @@ void* prvTraceGetCurrentTaskHandle(void);
# define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TraceObjectClassTable[kernelClass]
# define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_CLASS_TRACE_CLASS(CLASS, prvTraceGetObjectType(pxObject))
# define TRACE_GET_TIMER_NUMBER(tmr) ( ( objectHandleType ) ((Timer_t*)tmr)->uxTimerNumber )
# define TRACE_SET_TIMER_NUMBER(tmr) ((Timer_t*)tmr)->uxTimerNumber = xTraceGetObjectHandle(TRACE_CLASS_TIMER);
# define TRACE_GET_TIMER_NAME(pxTimer) pxTimer->pcTimerName
# define TRACE_GET_TIMER_PERIOD(pxTimer) pxTimer->xTimerPeriodInTicks
# define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( objectHandleType ) uxEventGroupGetNumber(eg) )
# define TRACE_SET_EVENTGROUP_NUMBER(eg) ((EventGroup_t*)eg)->uxEventGroupNumber = xTraceGetObjectHandle(TRACE_CLASS_EVENTGROUP);
# define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) (prvTraceGetObjectNumber(pxObject))
# define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) pxObject->uxQueueNumber = xTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject));
# define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass))
# define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject))
# define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) (EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)
# define TRACE_GET_TASK_EVENT_CODE(SERVICE, RESULT, CLASS, pxTCB) ( uint8_t)( EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK)
/************************************************************************/
/* KERNEL SPECIFIC WRAPPERS THAT SHOULD BE CALLED BY THE KERNEL */
@ -386,17 +474,18 @@ void* prvTraceGetCurrentTaskHandle(void);
vTraceStoreLowPower ( 1 ) ; \
}
/* A macro that will update the tick count when returning from tickless idle */
# undef traceINCREASE_TICK_COUNT( xCount )
# define traceINCREASE_TICK_COUNT( xCount ) { extern uint32_t uiTraceTickCount; uiTraceTickCount += xTickCount; }
# endif
/* A macro that will update the tick count when returning from tickless idle */
# undef traceINCREASE_TICK_COUNT
/* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/
# define traceINCREASE_TICK_COUNT( xCount ) { DWT_CYCLES_ADDED += (xCount * (TRACE_CPU_CLOCK_HZ / TRACE_TICK_RATE_HZ)); }
/* Called for each task that becomes ready */
# undef traceMOVED_TASK_TO_READY_STATE
# define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE ( pxTCB ) ;
/* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
# undef traceTASK_INCREMENT_TICK
# define traceTASK_INCREMENT_TICK( xTickCount ) \
@ -411,38 +500,38 @@ void* prvTraceGetCurrentTaskHandle(void);
/* Called on vTaskSuspend */
# undef traceTASK_SUSPEND
# define traceTASK_SUSPEND( pxTaskToSuspend ) \
trcKERNEL_HOOKS_TASK_SUSPEND ( TASK_SUSPEND , pxTaskToSuspend ) ;
trcKERNEL_HOOKS_TASK_SUSPEND ( TASK_SUSPEND , pxTaskToSuspend ) ;
/* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */
# undef traceTASK_DELAY
# define traceTASK_DELAY() \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_TASK_DELAY ( TASK_DELAY , pxCurrentTCB , xTicksToDelay ) ; \
trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED ( UNUSED , pxCurrentTCB ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED ( ) ;
/* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */
# undef traceTASK_DELAY_UNTIL
# define traceTASK_DELAY_UNTIL() \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_TASK_DELAY ( TASK_DELAY_UNTIL , pxCurrentTCB , xTimeToWake ) ; \
trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED ( UNUSED , pxCurrentTCB ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED ( ) ;
# if (INCLUDE_OBJECT_DELETE == 1)
/* Called on vTaskDelete */
# undef traceTASK_DELETE
# define traceTASK_DELETE( pxTaskToDelete ) \
trcKERNEL_HOOKS_TASK_DELETE ( DELETE , pxTaskToDelete ) ;
{ TRACE_SR_ALLOC_CRITICAL_SECTION ( ) ; \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_TASK_DELETE ( DELETE_OBJ , pxTaskToDelete ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ; }
# endif
# if (INCLUDE_OBJECT_DELETE == 1)
/* Called on vQueueDelete */
# undef traceQUEUE_DELETE
# define traceQUEUE_DELETE( pxQueue ) \
{ TRACE_SR_ALLOC_CRITICAL_SECTION ( ) ; \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_OBJECT_DELETE ( DELETE , UNUSED , pxQueue ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_OBJECT_DELETE ( DELETE _OBJ , UNUSED , pxQueue ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ; }
# endif
/* Called on vTaskCreate */
@ -450,94 +539,72 @@ void* prvTraceGetCurrentTaskHandle(void);
# define traceTASK_CREATE(pxNewTCB) \
if ( pxNewTCB ! = NULL ) \
{ \
trcKERNEL_HOOKS_TASK_CREATE ( CREATE , pxNewTCB ) ; \
trcKERNEL_HOOKS_TASK_CREATE ( CREATE _OBJ, UNUSED , pxNewTCB ) ; \
}
/* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
# undef traceTASK_CREATE_FAILED
# define traceTASK_CREATE_FAILED() \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_TASK_CREATE_FAILED ( CREATE ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_TASK_CREATE_FAILED ( CREATE_OBJ , UNUSED ) ;
/* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
# undef traceQUEUE_CREATE
# define traceQUEUE_CREATE( pxNewQueue )\
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_OBJECT_CREATE ( CREATE , UNUSED , pxNewQueue ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_OBJECT_CREATE ( CREATE_OBJ , UNUSED , pxNewQueue ) ;
/* Called in xQueueCreate, if the queue creation fails */
# undef traceQUEUE_CREATE_FAILED
# define traceQUEUE_CREATE_FAILED( queueType ) \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_OBJECT_CREATE_FAILED ( CREATE , UNUSED , queueType ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_OBJECT_CREATE_FAILED ( CREATE_OBJ , UNUSED , queueType ) ;
/* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */
# undef traceCREATE_MUTEX
# define traceCREATE_MUTEX( pxNewQueue ) \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_OBJECT_CREATE ( CREATE , UNUSED , pxNewQueue ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_OBJECT_CREATE ( CREATE_OBJ , UNUSED , pxNewQueue ) ;
/* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */
# undef traceCREATE_MUTEX_FAILED
# define traceCREATE_MUTEX_FAILED() \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_OBJECT_CREATE_FAILED ( CREATE , UNUSED , queueQUEUE_TYPE_MUTEX ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_OBJECT_CREATE_FAILED ( CREATE_OBJ , UNUSED , queueQUEUE_TYPE_MUTEX ) ;
/* Called when the Mutex can not be given, since not holder */
# undef traceGIVE_MUTEX_RECURSIVE_FAILED
# define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND , FAILED , UNUSED , pxMutex ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND , FAILED , UNUSED , pxMutex ) ;
/* Called when a message is sent to a queue */
/* Called when a message is sent to a queue */ /* CS IS NEW ! */
# undef traceQUEUE_SEND
# define traceQUEUE_SEND( pxQueue ) \
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND , SUCCESS , UNUSED , pxQueue ) ; \
trcKERNEL_HOOKS_SET_OBJECT_STATE ( UNUSED , pxQueue , TRACE_GET_OBJECT_TRACE_CLASS ( UNUSED , pxQueue ) = = TRACE_CLASS_MUTEX ? ( uint8_t ) 0 : ( uint8_t ) ( pxQueue - > uxMessagesWaiting + 1 ) ) ; /*For mutex, store the new owner rather than queue length */
trcKERNEL_HOOKS_SET_OBJECT_STATE ( UNUSED , pxQueue , TRACE_GET_OBJECT_TRACE_CLASS ( UNUSED , pxQueue ) = = TRACE_CLASS_MUTEX ? ( uint8_t ) 0 : ( uint8_t ) ( pxQueue - > uxMessagesWaiting + 1 ) ) ;
/* Called when a message failed to be sent to a queue (timeout) */
# undef traceQUEUE_SEND_FAILED
# define traceQUEUE_SEND_FAILED( pxQueue ) \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND , FAILED , UNUSED , pxQueue ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND , FAILED , UNUSED , pxQueue ) ;
/* Called when the task is blocked due to a send operation on a full queue */
# undef traceBLOCKING_ON_QUEUE_SEND
# define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND , BLOCK , UNUSED , pxQueue ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND , BLOCK , UNUSED , pxQueue ) ;
/* Called when a message is received from a queue */
# undef traceQUEUE_RECEIVE
# define traceQUEUE_RECEIVE( pxQueue ) \
trcKERNEL_HOOKS_KERNEL_SERVICE ( RECEIVE , SUCCESS , UNUSED , pxQueue ) ; \
trcKERNEL_HOOKS_SET_OBJECT_STATE ( UNUSED , pxQueue , TRACE_GET_OBJECT_TRACE_CLASS ( UNUSED , pxQueue ) = = TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER ( TRACE_GET_CURRENT_TASK ( ) ) : ( uint8_t ) ( pxQueue - > uxMessagesWaiting - 1 ) ) ; /*For mutex, store the new owner rather than queue length */
trcKERNEL_HOOKS_SET_OBJECT_STATE ( UNUSED , pxQueue , TRACE_GET_OBJECT_TRACE_CLASS ( UNUSED , pxQueue ) = = TRACE_CLASS_MUTEX ? TRACE_GET_TASK_NUMBER ( TRACE_GET_CURRENT_TASK ( ) ) : ( uint8_t ) ( pxQueue - > uxMessagesWaiting - 1 ) ) ;
/* Called when a receive operation on a queue fails (timeout) */
# undef traceQUEUE_RECEIVE_FAILED
# define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_KERNEL_SERVICE ( RECEIVE , FAILED , UNUSED , pxQueue ) ; \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
trcKERNEL_HOOKS_KERNEL_SERVICE ( RECEIVE , FAILED , UNUSED , pxQueue ) ;
/* Called when the task is blocked due to a receive operation on an empty queue */
# undef traceBLOCKING_ON_QUEUE_RECEIVE
# define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
TRACE_ENTER_CRITICAL_SECTION ( ) ; \
trcKERNEL_HOOKS_KERNEL_SERVICE ( RECEIVE , BLOCK , UNUSED , pxQueue ) ; \
if ( TRACE_GET_OBJECT_TRACE_CLASS ( UNUSED , pxQueue ) ! = TRACE_CLASS_MUTEX ) \
{ \
trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED ( UNUSED , pxQueue ) ; \
} \
TRACE_EXIT_CRITICAL_SECTION ( ) ;
{ trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED ( ) ; }
/* Called on xQueuePeek */
# undef traceQUEUE_PEEK
@ -554,43 +621,138 @@ void* prvTraceGetCurrentTaskHandle(void);
# undef traceQUEUE_SEND_FROM_ISR_FAILED
# define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
trcKERNEL_HOOKS_KERNEL_SERVICE ( SEND_FROM_ISR , FAILED , UNUSED , pxQueue ) ;
/* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
# undef traceQUEUE_RECEIVE_FROM_ISR
# define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
trcKERNEL_HOOKS_KERNEL_SERVICE ( RECEIVE_FROM_ISR , SUCCESS , UNUSED , pxQueue ) ; \
trcKERNEL_HOOKS_SET_OBJECT_STATE ( UNUSED , pxQueue , ( uint8_t ) ( pxQueue - > uxMessagesWaiting - 1 ) ) ;
/* Called when a message receive from interrupt context fails (since the queue was empty) */
# undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
# define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
trcKERNEL_HOOKS_KERNEL_SERVICE ( RECEIVE_FROM_ISR , FAILED , UNUSED , pxQueue ) ;
/* Called in vTaskPrioritySet */
# undef traceTASK_PRIORITY_SET
# define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE ( TASK_PRIORITY_SET , pxTask , uxNewPriority ) ;
/* Called in vTaskPriorityInherit, which is called by Mutex operations */
# undef traceTASK_PRIORITY_INHERIT
# define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE ( TASK_PRIORITY_INHERIT , pxTask , uxNewPriority ) ;
/* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
# undef traceTASK_PRIORITY_DISINHERIT
# define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE ( TASK_PRIORITY_DISINHERIT , pxTask , uxNewPriority ) ;
/* Called in vTaskResume */
# undef traceTASK_RESUME
# define traceTASK_RESUME( pxTaskToResume ) \
trcKERNEL_HOOKS_TASK_RESUME ( TASK_RESUME , pxTaskToResume ) ;
/* Called in vTaskResumeFromISR */
# undef traceTASK_RESUME_FROM_ISR
# define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
trcKERNEL_HOOKS_TASK_RESUME ( TASK_RESUME_FROM_ISR , pxTaskToResume ) ;
/* Called in timer.c - xTimerCreate */
# undef traceTIMER_CREATE
# define traceTIMER_CREATE(tmr) \
trcKERNEL_HOOKS_TIMER_CREATE ( TIMER_CREATE , tmr ) ;
# undef traceTIMER_CREATE_FAILED
# define traceTIMER_CREATE_FAILED() \
trcKERNEL_HOOKS_TIMER_EVENT ( TIMER_CREATE_FAILED , 0 ) ;
/* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */
# undef traceTIMER_COMMAND_SEND
# define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
if ( xCommandID > tmrCOMMAND_START_DONT_TRACE ) { \
if ( xCommandID = = tmrCOMMAND_CHANGE_PERIOD ) vTraceStoreKernelCallWithParam ( ( xReturn = = pdPASS ) ? TIMER_CHANGE_PERIOD : TIMER_CHANGE_PERIOD_FAILED , TRACE_CLASS_TIMER , TRACE_GET_TIMER_NUMBER ( tmr ) , xOptionalValue ) ; \
else if ( ( xCommandID = = tmrCOMMAND_DELETE ) & & ( xReturn = = pdPASS ) ) { trcKERNEL_HOOKS_TIMER_DELETE ( TIMER_DELETE , tmr ) ; } \
else { trcKERNEL_HOOKS_TIMER_EVENT ( EVENTGROUP_TIMER + xCommandID + ( ( xReturn = = pdPASS ) ? 0 : ( TIMER_CREATE_FAILED - TIMER_CREATE ) ) , tmr ) ; } \
}
# undef tracePEND_FUNC_CALL
# define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \
if ( ret = = pdPASS ) \
vTraceStoreKernelCall ( PEND_FUNC_CALL , TRACE_CLASS_TASK , uxTaskGetTaskNumber ( xTimerGetTimerDaemonTaskHandle ( ) ) ) ; \
else \
vTraceStoreKernelCall ( PEND_FUNC_CALL_FAILED , TRACE_CLASS_TASK , uxTaskGetTaskNumber ( xTimerGetTimerDaemonTaskHandle ( ) ) ) ;
# undef tracePEND_FUNC_CALL_FROM_ISR
# define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \
if ( ! uiInEventGroupSetBitsFromISR ) vTraceStoreKernelCall ( PEND_FUNC_CALL_FROM_ISR , TRACE_CLASS_TASK , uxTaskGetTaskNumber ( xTimerGetTimerDaemonTaskHandle ( ) ) ) ; \
uiInEventGroupSetBitsFromISR = 0 ;
# undef traceEVENT_GROUP_CREATE
# define traceEVENT_GROUP_CREATE(eg) \
TRACE_SET_EVENTGROUP_NUMBER ( eg ) ; \
vTraceStoreKernelCall ( EVENT_GROUP_CREATE , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) ) ;
# undef traceEVENT_GROUP_DELETE
# define traceEVENT_GROUP_DELETE(eg) \
vTraceStoreKernelCall ( EVENT_GROUP_DELETE , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) ) ; \
vTraceStoreObjectNameOnCloseEvent ( TRACE_GET_EVENTGROUP_NUMBER ( eg ) , TRACE_CLASS_EVENTGROUP ) ; \
vTraceStoreObjectPropertiesOnCloseEvent ( TRACE_GET_EVENTGROUP_NUMBER ( eg ) , TRACE_CLASS_EVENTGROUP ) ; \
vTraceFreeObjectHandle ( TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) ) ;
# undef traceEVENT_GROUP_CREATE_FAILED
# define traceEVENT_GROUP_CREATE_FAILED() \
vTraceStoreKernelCall ( EVENT_GROUP_CREATE_FAILED , TRACE_CLASS_EVENTGROUP , 0 ) ;
# undef traceEVENT_GROUP_SYNC_BLOCK
# define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \
vTraceStoreKernelCallWithParam ( EVENT_GROUP_SYNC_BLOCK , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToWaitFor ) ;
# undef traceEVENT_GROUP_SYNC_END
# define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \
if ( wasTimeout ) { vTraceStoreKernelCallWithParam ( EVENT_GROUP_SYNC_END_FAILED , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToWaitFor ) ; } \
else { vTraceStoreKernelCallWithParam ( EVENT_GROUP_SYNC_END , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToWaitFor ) ; }
# undef traceEVENT_GROUP_WAIT_BITS_BLOCK
# define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \
vTraceStoreKernelCallWithParam ( EVENT_GROUP_WAIT_BITS_BLOCK , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToWaitFor ) ; \
trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED ( ) ;
# undef traceEVENT_GROUP_WAIT_BITS_END
# define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \
if ( wasTimeout ) { vTraceStoreKernelCallWithParam ( EVENT_GROUP_WAIT_BITS_END_FAILED , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToWaitFor ) ; } \
else { vTraceStoreKernelCallWithParam ( EVENT_GROUP_WAIT_BITS_END , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToWaitFor ) ; }
# undef traceEVENT_GROUP_CLEAR_BITS
# define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
if ( bitsToClear ) vTraceStoreKernelCallWithParam ( EVENT_GROUP_CLEAR_BITS , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToClear ) ;
# undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
# define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
if ( bitsToClear ) vTraceStoreKernelCallWithParam ( EVENT_GROUP_CLEAR_BITS_FROM_ISR , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToClear ) ;
# undef traceEVENT_GROUP_SET_BITS
# define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
vTraceStoreKernelCallWithParam ( EVENT_GROUP_SET_BITS , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToSet ) ;
# undef traceEVENT_GROUP_SET_BITS_FROM_ISR
# define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
vTraceStoreKernelCallWithParam ( EVENT_GROUP_SET_BITS_FROM_ISR , TRACE_CLASS_EVENTGROUP , TRACE_GET_EVENTGROUP_NUMBER ( eg ) , bitsToSet ) ; \
uiInEventGroupSetBitsFromISR = 1 ;
# if (INCLUDE_MEMMANG_EVENTS == 1)
extern void vTraceStoreMemMangEvent ( uint32_t ecode , uint32_t address , uint32_t size ) ;
# undef traceMALLOC
# define traceMALLOC( pvAddress, uiSize ) {vTraceStoreMemMangEvent(MEM_MALLOC_SIZE, ( uint32_t ) pvAddress, uiSize); TRACE_UPDATE_HEAP_USAGE_POSITIVE(uiSize);}
# undef traceFREE
# define traceFREE( pvAddress, uiSize ) {vTraceStoreMemMangEvent(MEM_FREE_SIZE, ( uint32_t ) pvAddress, uiSize); TRACE_UPDATE_HEAP_USAGE_NEGATIVE(uiSize);}
# endif
/************************************************************************/
/* KERNEL SPECIFIC MACROS TO EXCLUDE OR INCLUDE THINGS IN TRACE */
@ -615,6 +777,15 @@ uint8_t uiTraceIsObjectExcluded(traceObjectClass objectclass, objectHandleType h
# define TRACE_CLEAR_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
# define TRACE_GET_TASK_FLAG_ISEXCLUDED(taskIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+taskIndex)
# define TRACE_SET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)
# define TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)
# define TRACE_GET_TIMER_FLAG_ISEXCLUDED(timerIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+timerIndex)
# define TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_SET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)
# define TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_CLEAR_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)
# define TRACE_GET_EVENTGROUP_FLAG_ISEXCLUDED(egIndex) TRACE_GET_FLAG_ISEXCLUDED(excludedObjects, NQueue+1+NSemaphore+1+NMutex+1+NTask+1+NTimer+1+egIndex)
# define TRACE_CLEAR_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
switch ( objectclass ) \
{ \
@ -630,6 +801,12 @@ case TRACE_CLASS_MUTEX: \
case TRACE_CLASS_TASK : \
TRACE_CLEAR_TASK_FLAG_ISEXCLUDED ( handle ) ; \
break ; \
case TRACE_CLASS_TIMER : \
TRACE_CLEAR_TIMER_FLAG_ISEXCLUDED ( handle ) ; \
break ; \
case TRACE_CLASS_EVENTGROUP : \
TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED ( handle ) ; \
break ; \
}
# define TRACE_SET_OBJECT_FLAG_ISEXCLUDED(objectclass, handle) \
@ -647,6 +824,12 @@ case TRACE_CLASS_MUTEX: \
case TRACE_CLASS_TASK : \
TRACE_SET_TASK_FLAG_ISEXCLUDED ( handle ) ; \
break ; \
case TRACE_CLASS_TIMER : \
TRACE_SET_TIMER_FLAG_ISEXCLUDED ( handle ) ; \
break ; \
case TRACE_CLASS_EVENTGROUP : \
TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED ( handle ) ; \
break ; \
}
/* Task */
@ -680,6 +863,20 @@ TRACE_SET_MUTEX_FLAG_ISEXCLUDED(TRACE_GET_OBJECT_NUMBER(UNUSED, handle));
# define vTraceIncludeMutexInTrace(handle) \
TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED ( TRACE_GET_OBJECT_NUMBER ( UNUSED , handle ) ) ;
/* Timer */
# define vTraceExcludeTimerFromTrace(handle) \
TRACE_SET_TIMER_FLAG_ISEXCLUDED ( TRACE_GET_TIMER_NUMBER ( handle ) ) ;
# define vTraceIncludeTimerInTrace(handle) \
TRACE_CLEAR_QUEUE_FLAG_ISEXCLUDED ( TRACE_GET_TIMER_NUMBER ( handle ) ) ;
/* Event Group */
# define vTraceExcludeEventGroupFromTrace(handle) \
TRACE_SET_EVENTGROUP_FLAG_ISEXCLUDED ( TRACE_GET_EVENTGROUP_NUMBER ( handle ) ) ;
# define vTraceIncludeEventGroupInTrace(handle) \
TRACE_CLEAR_EVENTGROUP_FLAG_ISEXCLUDED ( TRACE_GET_EVENTGROUP_NUMBER ( handle ) ) ;
/* Kernel Services */
# define vTraceExcludeKernelServiceDelayFromTrace() \
@ -770,6 +967,11 @@ vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJE
# define vTraceSetMutexName(object, name) \
vTraceSetObjectName ( TRACE_GET_OBJECT_TRACE_CLASS ( UNUSED , object ) , TRACE_GET_OBJECT_NUMBER ( UNUSED , object ) , name ) ;
# define vTraceSetEventGroupName(object, name) \
vTraceSetObjectName ( TRACE_CLASS_EVENTGROUP , uxEventGroupGetNumber ( object ) , name ) ;
# undef traceQUEUE_REGISTRY_ADD
# define traceQUEUE_REGISTRY_ADD(object, name) vTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(UNUSED, object), TRACE_GET_OBJECT_NUMBER(UNUSED, object), name);
# endif
# endif /* TRCKERNELPORT_H_ */