Message ID | 20180620204841.56354-11-seanpaul@chromium.org (mailing list archive) |
---|---|
State | Not Applicable, archived |
Headers | show |
On 2018-06-21 02:18, Sean Paul wrote: > This patch converts all DPU_EVTs in dpu_core_irq with either a DRM_* > log > message or a linux tracepoint. > > Signed-off-by: Sean Paul <seanpaul@chromium.org> Reviewed-by: Rajesh Yadav <ryadav@codeaurora.org> > --- > drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c | 52 ++++++++------------ > drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h | 50 +++++++++++++++++++ > 2 files changed, 71 insertions(+), 31 deletions(-) > > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c > b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c > index 33ab2ac46833..530c24dec017 100644 > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c > @@ -18,6 +18,7 @@ > #include <linux/kthread.h> > > #include "dpu_core_irq.h" > +#include "dpu_trace.h" > > /** > * dpu_core_irq_callback_handler - dispatch core interrupts > @@ -34,10 +35,8 @@ static void dpu_core_irq_callback_handler(void > *arg, int irq_idx) > pr_debug("irq_idx=%d\n", irq_idx); > > if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) { > - DPU_ERROR("irq_idx=%d has no registered callback\n", irq_idx); > - DPU_EVT32_IRQ(irq_idx, atomic_read( > - &dpu_kms->irq_obj.enable_counts[irq_idx]), > - DPU_EVTLOG_ERROR); > + DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx, > + atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); > } > > atomic_inc(&irq_obj->irq_counts[irq_idx]); > @@ -80,7 +79,7 @@ int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms, > static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx) > { > unsigned long irq_flags; > - int ret = 0; > + int ret = 0, enable_count; > > if (!dpu_kms || !dpu_kms->hw_intr || > !dpu_kms->irq_obj.enable_counts || > @@ -94,11 +93,10 @@ static int _dpu_core_irq_enable(struct dpu_kms > *dpu_kms, int irq_idx) > return -EINVAL; > } > > - DPU_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx, > - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); > + enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]); > + DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count); > + trace_dpu_core_irq_enable_idx(irq_idx, enable_count); > > - DPU_EVT32(irq_idx, > - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); > if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) > { > ret = dpu_kms->hw_intr->ops.enable_irq( > dpu_kms->hw_intr, > @@ -130,11 +128,8 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, > int *irq_idxs, u32 irq_count) > } > > counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]); > - if (counts) { > - DPU_ERROR("%pS: irq_idx=%d enable_count=%d\n", > - __builtin_return_address(0), irq_idxs[0], counts); > - DPU_EVT32(irq_idxs[0], counts, DPU_EVTLOG_ERROR); > - } > + if (counts) > + DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts); > > for (i = 0; (i < irq_count) && !ret; i++) > ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]); > @@ -149,7 +144,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, > int *irq_idxs, u32 irq_count) > */ > static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx) > { > - int ret = 0; > + int ret = 0, enable_count; > > if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) > { > DPU_ERROR("invalid params\n"); > @@ -161,11 +156,10 @@ static int _dpu_core_irq_disable(struct dpu_kms > *dpu_kms, int irq_idx) > return -EINVAL; > } > > - DPU_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx, > - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); > + enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]); > + DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count); > + trace_dpu_core_irq_disable_idx(irq_idx, enable_count); > > - DPU_EVT32(irq_idx, > - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); > if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) > { > ret = dpu_kms->hw_intr->ops.disable_irq( > dpu_kms->hw_intr, > @@ -189,11 +183,8 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, > int *irq_idxs, u32 irq_count) > } > > counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]); > - if (counts == 2) { > - DPU_ERROR("%pS: irq_idx=%d enable_count=%d\n", > - __builtin_return_address(0), irq_idxs[0], counts); > - DPU_EVT32(irq_idxs[0], counts, DPU_EVTLOG_ERROR); > - } > + if (counts == 2) > + DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts); > > for (i = 0; (i < irq_count) && !ret; i++) > ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]); > @@ -209,7 +200,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, > int *irq_idxs, u32 irq_count) > */ > int dpu_core_irq_disable_nolock(struct dpu_kms *dpu_kms, int irq_idx) > { > - int ret = 0; > + int ret = 0, enable_count; > > if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) > { > DPU_ERROR("invalid params\n"); > @@ -221,11 +212,10 @@ int dpu_core_irq_disable_nolock(struct dpu_kms > *dpu_kms, int irq_idx) > return -EINVAL; > } > > - DPU_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx, > - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); > + enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]); > + DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count); > + trace_dpu_core_irq_disable_nolock(irq_idx, enable_count); > > - DPU_EVT32(irq_idx, > - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); > if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) > { > ret = dpu_kms->hw_intr->ops.disable_irq_nolock( > dpu_kms->hw_intr, > @@ -297,7 +287,7 @@ int dpu_core_irq_register_callback(struct dpu_kms > *dpu_kms, int irq_idx, > DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), > irq_idx); > > spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags); > - DPU_EVT32(irq_idx, register_irq_cb); > + trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb); > list_del_init(®ister_irq_cb->list); > list_add_tail(®ister_irq_cb->list, > &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]); > @@ -332,7 +322,7 @@ int dpu_core_irq_unregister_callback(struct > dpu_kms *dpu_kms, int irq_idx, > DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), > irq_idx); > > spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags); > - DPU_EVT32(irq_idx, register_irq_cb); > + trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb); > list_del_init(®ister_irq_cb->list); > /* empty callback list but interrupt is still enabled */ > if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) && > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h > b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h > index 9d044f5ce26e..ee41db86a2e9 100644 > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h > @@ -966,6 +966,56 @@ TRACE_EVENT(dpu_pp_connect_ext_te, > TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg) > ); > > +DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template, > + TP_PROTO(int irq_idx, int enable_count), > + TP_ARGS(irq_idx, enable_count), > + TP_STRUCT__entry( > + __field( int, irq_idx ) > + __field( int, enable_count ) > + ), > + TP_fast_assign( > + __entry->irq_idx = irq_idx; > + __entry->enable_count = enable_count; > + ), > + TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx, > + __entry->enable_count) > +); > +DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx, > + TP_PROTO(int irq_idx, int enable_count), > + TP_ARGS(irq_idx, enable_count) > +); > +DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx, > + TP_PROTO(int irq_idx, int enable_count), > + TP_ARGS(irq_idx, enable_count) > +); > +DEFINE_EVENT(dpu_core_irq_idx_cnt_template, > dpu_core_irq_disable_nolock, > + TP_PROTO(int irq_idx, int enable_count), > + TP_ARGS(irq_idx, enable_count) > +); > + > +DECLARE_EVENT_CLASS(dpu_core_irq_callback_template, > + TP_PROTO(int irq_idx, struct dpu_irq_callback *callback), > + TP_ARGS(irq_idx, callback), > + TP_STRUCT__entry( > + __field( int, irq_idx ) > + __field( struct dpu_irq_callback *, callback) > + ), > + TP_fast_assign( > + __entry->irq_idx = irq_idx; > + __entry->callback = callback; > + ), > + TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx, > + __entry->callback) > +); > +DEFINE_EVENT(dpu_core_irq_callback_template, > dpu_core_irq_register_callback, > + TP_PROTO(int irq_idx, struct dpu_irq_callback *callback), > + TP_ARGS(irq_idx, callback) > +); > +DEFINE_EVENT(dpu_core_irq_callback_template, > dpu_core_irq_unregister_callback, > + TP_PROTO(int irq_idx, struct dpu_irq_callback *callback), > + TP_ARGS(irq_idx, callback) > +); > + > #define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, > name, 0) > #define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, > name, 1) > #define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__) -- To unsubscribe from this list: send the line "unsubscribe linux-arm-msm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c index 33ab2ac46833..530c24dec017 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c @@ -18,6 +18,7 @@ #include <linux/kthread.h> #include "dpu_core_irq.h" +#include "dpu_trace.h" /** * dpu_core_irq_callback_handler - dispatch core interrupts @@ -34,10 +35,8 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx) pr_debug("irq_idx=%d\n", irq_idx); if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) { - DPU_ERROR("irq_idx=%d has no registered callback\n", irq_idx); - DPU_EVT32_IRQ(irq_idx, atomic_read( - &dpu_kms->irq_obj.enable_counts[irq_idx]), - DPU_EVTLOG_ERROR); + DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx, + atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); } atomic_inc(&irq_obj->irq_counts[irq_idx]); @@ -80,7 +79,7 @@ int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms, static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx) { unsigned long irq_flags; - int ret = 0; + int ret = 0, enable_count; if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts || @@ -94,11 +93,10 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx) return -EINVAL; } - DPU_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx, - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); + enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]); + DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count); + trace_dpu_core_irq_enable_idx(irq_idx, enable_count); - DPU_EVT32(irq_idx, - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) { ret = dpu_kms->hw_intr->ops.enable_irq( dpu_kms->hw_intr, @@ -130,11 +128,8 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) } counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]); - if (counts) { - DPU_ERROR("%pS: irq_idx=%d enable_count=%d\n", - __builtin_return_address(0), irq_idxs[0], counts); - DPU_EVT32(irq_idxs[0], counts, DPU_EVTLOG_ERROR); - } + if (counts) + DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts); for (i = 0; (i < irq_count) && !ret; i++) ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]); @@ -149,7 +144,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) */ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx) { - int ret = 0; + int ret = 0, enable_count; if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { DPU_ERROR("invalid params\n"); @@ -161,11 +156,10 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx) return -EINVAL; } - DPU_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx, - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); + enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]); + DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count); + trace_dpu_core_irq_disable_idx(irq_idx, enable_count); - DPU_EVT32(irq_idx, - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) { ret = dpu_kms->hw_intr->ops.disable_irq( dpu_kms->hw_intr, @@ -189,11 +183,8 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) } counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]); - if (counts == 2) { - DPU_ERROR("%pS: irq_idx=%d enable_count=%d\n", - __builtin_return_address(0), irq_idxs[0], counts); - DPU_EVT32(irq_idxs[0], counts, DPU_EVTLOG_ERROR); - } + if (counts == 2) + DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts); for (i = 0; (i < irq_count) && !ret; i++) ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]); @@ -209,7 +200,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) */ int dpu_core_irq_disable_nolock(struct dpu_kms *dpu_kms, int irq_idx) { - int ret = 0; + int ret = 0, enable_count; if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { DPU_ERROR("invalid params\n"); @@ -221,11 +212,10 @@ int dpu_core_irq_disable_nolock(struct dpu_kms *dpu_kms, int irq_idx) return -EINVAL; } - DPU_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx, - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); + enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]); + DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count); + trace_dpu_core_irq_disable_nolock(irq_idx, enable_count); - DPU_EVT32(irq_idx, - atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx])); if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) { ret = dpu_kms->hw_intr->ops.disable_irq_nolock( dpu_kms->hw_intr, @@ -297,7 +287,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags); - DPU_EVT32(irq_idx, register_irq_cb); + trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb); list_del_init(®ister_irq_cb->list); list_add_tail(®ister_irq_cb->list, &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]); @@ -332,7 +322,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags); - DPU_EVT32(irq_idx, register_irq_cb); + trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb); list_del_init(®ister_irq_cb->list); /* empty callback list but interrupt is still enabled */ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) && diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h index 9d044f5ce26e..ee41db86a2e9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h @@ -966,6 +966,56 @@ TRACE_EVENT(dpu_pp_connect_ext_te, TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg) ); +DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template, + TP_PROTO(int irq_idx, int enable_count), + TP_ARGS(irq_idx, enable_count), + TP_STRUCT__entry( + __field( int, irq_idx ) + __field( int, enable_count ) + ), + TP_fast_assign( + __entry->irq_idx = irq_idx; + __entry->enable_count = enable_count; + ), + TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx, + __entry->enable_count) +); +DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx, + TP_PROTO(int irq_idx, int enable_count), + TP_ARGS(irq_idx, enable_count) +); +DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx, + TP_PROTO(int irq_idx, int enable_count), + TP_ARGS(irq_idx, enable_count) +); +DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_nolock, + TP_PROTO(int irq_idx, int enable_count), + TP_ARGS(irq_idx, enable_count) +); + +DECLARE_EVENT_CLASS(dpu_core_irq_callback_template, + TP_PROTO(int irq_idx, struct dpu_irq_callback *callback), + TP_ARGS(irq_idx, callback), + TP_STRUCT__entry( + __field( int, irq_idx ) + __field( struct dpu_irq_callback *, callback) + ), + TP_fast_assign( + __entry->irq_idx = irq_idx; + __entry->callback = callback; + ), + TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx, + __entry->callback) +); +DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback, + TP_PROTO(int irq_idx, struct dpu_irq_callback *callback), + TP_ARGS(irq_idx, callback) +); +DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback, + TP_PROTO(int irq_idx, struct dpu_irq_callback *callback), + TP_ARGS(irq_idx, callback) +); + #define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0) #define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1) #define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
This patch converts all DPU_EVTs in dpu_core_irq with either a DRM_* log message or a linux tracepoint. Signed-off-by: Sean Paul <seanpaul@chromium.org> --- drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c | 52 ++++++++------------ drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h | 50 +++++++++++++++++++ 2 files changed, 71 insertions(+), 31 deletions(-)