@@ -7,6 +7,7 @@
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
/*
* Try to transition the preemption state from old to new. Return
@@ -174,6 +175,8 @@ void a6xx_preempt_irq(struct msm_gpu *gpu)
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+ trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id);
+
/*
* Retrigger preemption to avoid a deadlock that might occur when preemption
* is skipped due to it being already in flight when requested.
@@ -295,6 +298,9 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
*/
ring->restore_wptr = false;
+ trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id,
+ ring ? ring->id : -1);
+
spin_unlock_irqrestore(&ring->preempt_lock, flags);
gpu_write64(gpu,
@@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume,
TP_printk("%u", __entry->dummy)
);
+TRACE_EVENT(msm_gpu_preemption_trigger,
+ TP_PROTO(int ring_id_from, int ring_id_to),
+ TP_ARGS(ring_id_from, ring_id_to),
+ TP_STRUCT__entry(
+ __field(int, ring_id_from)
+ __field(int, ring_id_to)
+ ),
+ TP_fast_assign(
+ __entry->ring_id_from = ring_id_from;
+ __entry->ring_id_to = ring_id_to;
+ ),
+ TP_printk("preempting %u -> %u",
+ __entry->ring_id_from,
+ __entry->ring_id_to)
+);
+
+TRACE_EVENT(msm_gpu_preemption_irq,
+ TP_PROTO(u32 ring_id),
+ TP_ARGS(ring_id),
+ TP_STRUCT__entry(
+ __field(u32, ring_id)
+ ),
+ TP_fast_assign(
+ __entry->ring_id = ring_id;
+ ),
+ TP_printk("preempted to %u", __entry->ring_id)
+);
+
#endif
#undef TRACE_INCLUDE_PATH