@@ -113,7 +113,7 @@ static inline u32 intel_uncore_read_fw(struct fake_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__fake_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32_notrace(__fake_uncore_to_gt(uncore), reg);
}
static inline void intel_uncore_write_fw(struct fake_uncore *uncore,
@@ -121,7 +121,7 @@ static inline void intel_uncore_write_fw(struct fake_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__fake_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32_notrace(__fake_uncore_to_gt(uncore), reg, val);
}
static inline u32 intel_uncore_read_notrace(struct fake_uncore *uncore,
@@ -129,7 +129,7 @@ static inline u32 intel_uncore_read_notrace(struct fake_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__fake_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32_notrace(__fake_uncore_to_gt(uncore), reg);
}
static inline void intel_uncore_write_notrace(struct fake_uncore *uncore,
@@ -137,7 +137,7 @@ static inline void intel_uncore_write_notrace(struct fake_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__fake_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32_notrace(__fake_uncore_to_gt(uncore), reg, val);
}
#endif /* __INTEL_UNCORE_H__ */
@@ -19,6 +19,7 @@
#include "xe_gt_mcr.h"
#include "xe_macros.h"
#include "xe_module.h"
+#include "xe_trace.h"
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
#define TILE_COUNT REG_GENMASK(15, 8)
@@ -435,7 +436,7 @@ static const struct xe_reg mmio_read_whitelist[] = {
RING_TIMESTAMP(RENDER_RING_BASE),
};
-inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+inline u8 xe_mmio_read8_notrace(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -445,6 +446,32 @@ inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
return readb(tile->mmio.regs + reg.addr);
}
+inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ u8 val;
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ val = readb(tile->mmio.regs + reg.addr);
+
+ trace_xe_reg_rw(false, ®, val, sizeof(val));
+
+ return val;
+}
+
+inline void xe_mmio_write32_notrace(struct xe_gt *gt,
+ struct xe_reg reg, u32 val)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ writel(val, tile->mmio.regs + reg.addr);
+}
+
inline void xe_mmio_write32(struct xe_gt *gt,
struct xe_reg reg, u32 val)
{
@@ -453,10 +480,12 @@ inline void xe_mmio_write32(struct xe_gt *gt,
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
+ trace_xe_reg_rw(true, ®, val, sizeof(val));
+
writel(val, tile->mmio.regs + reg.addr);
}
-inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+inline u32 xe_mmio_read32_notrace(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -466,6 +495,21 @@ inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
return readl(tile->mmio.regs + reg.addr);
}
+inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ u32 val;
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ val = readl(tile->mmio.regs + reg.addr);
+
+ trace_xe_reg_rw(false, ®, val, sizeof(val));
+
+ return val;
+}
+
inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
u32 set)
{
@@ -478,6 +522,17 @@ inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
return old;
}
+inline void xe_mmio_write64_notrace(struct xe_gt *gt,
+ struct xe_reg reg, u64 val)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ writeq(val, tile->mmio.regs + reg.addr);
+}
+
inline void xe_mmio_write64(struct xe_gt *gt,
struct xe_reg reg, u64 val)
{
@@ -486,10 +541,12 @@ inline void xe_mmio_write64(struct xe_gt *gt,
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
+ trace_xe_reg_rw(true, ®, val, sizeof(val));
+
writeq(val, tile->mmio.regs + reg.addr);
}
-inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
+inline u64 xe_mmio_read64_notrace(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -499,6 +556,21 @@ inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
return readq(tile->mmio.regs + reg.addr);
}
+inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ u64 val;
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ val = readq(tile->mmio.regs + reg.addr);
+
+ trace_xe_reg_rw(false, ®, val, sizeof(val));
+
+ return val;
+}
+
inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
struct xe_reg reg, u32 val,
u32 mask, u32 eval)
@@ -522,7 +594,7 @@ inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
u32 read;
for (;;) {
- read = xe_mmio_read32(gt, reg);
+ read = xe_mmio_read32_notrace(gt, reg);
if ((read & mask) == val) {
ret = 0;
break;
@@ -542,6 +614,8 @@ inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
wait <<= 1;
}
+ trace_xe_reg_rw(false, ®, read, sizeof(read));
+
if (out_val)
*out_val = read;
@@ -20,14 +20,21 @@ struct xe_device;
#define GEN12_LMEM_BAR 2
int xe_mmio_init(struct xe_device *xe);
+inline u8 xe_mmio_read8_notrace(struct xe_gt *gt, struct xe_reg reg);
inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
+inline void xe_mmio_write32_notrace(struct xe_gt *gt,
+ struct xe_reg reg, u32 val);
inline void xe_mmio_write32(struct xe_gt *gt,
struct xe_reg reg, u32 val);
+inline u32 xe_mmio_read32_notrace(struct xe_gt *gt, struct xe_reg reg);
inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
u32 set);
+inline void xe_mmio_write64_notrace(struct xe_gt *gt,
+ struct xe_reg reg, u64 val);
inline void xe_mmio_write64(struct xe_gt *gt,
struct xe_reg reg, u64 val);
+inline u64 xe_mmio_read64_notrace(struct xe_gt *gt, struct xe_reg reg);
inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg);
inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
struct xe_reg reg, u32 val,
@@ -12,6 +12,7 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
+#include "regs/xe_reg_defs.h"
#include "xe_bo_types.h"
#include "xe_engine_types.h"
#include "xe_gt_tlb_invalidation_types.h"
@@ -507,6 +508,31 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
TP_ARGS(vm)
);
+TRACE_EVENT(xe_reg_rw,
+ TP_PROTO(bool write, struct xe_reg *reg, u64 val, int len),
+ TP_ARGS(write, reg, val, len),
+
+ TP_STRUCT__entry(
+ __field(u64, val)
+ __field(struct xe_reg *, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->val = (u64)val;
+ __entry->reg = reg;
+ __entry->write = write;
+ __entry->len = len;
+ ),
+
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg->addr, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
+);
+
TRACE_EVENT(xe_guc_ct_h2g_flow_control,
TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
TP_ARGS(_head, _tail, size, space, len),
This will help debug register read/writes and provide a way to trace all the mmio transactions. In order to avoid spam from xe_mmio_wait32, xe_mmio_read32_notrace is introduced and used. Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com> --- .../drm/xe/compat-i915-headers/intel_uncore.h | 8 +- drivers/gpu/drm/xe/xe_mmio.c | 82 ++++++++++++++++++- drivers/gpu/drm/xe/xe_mmio.h | 7 ++ drivers/gpu/drm/xe/xe_trace.h | 26 ++++++ 4 files changed, 115 insertions(+), 8 deletions(-)