diff mbox

[1/2] drm/i915: Add tracepoints

Message ID 1251239598-20408-1-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State Superseded
Headers show

Commit Message

Chris Wilson Aug. 25, 2009, 10:33 p.m. UTC
By adding tracepoint equivalents for WATCH_BUF/EXEC we are able to monitor
the lifetimes of objects, requests and significant events. These events can
then be probed using the tracing frameworks, such as systemtap and, in
particular, perf.

For example to record the stack trace for every GPU stall during a run, use

  $ perf record -e i915:i915_gem_request_wait_begin -c 1 -g

And

  $ perf report

to view the results.

CC: Arjan van de Ven <arjan@linux.intel.com>
CC: Ben Gamari <bgamari@gmail.com>

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c   |  119 ++++++++++++++--
 drivers/gpu/drm/i915/i915_irq.c   |    9 +-
 drivers/gpu/drm/i915/i915_trace.h |  281 +++++++++++++++++++++++++++++++++++++
 3 files changed, 394 insertions(+), 15 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_trace.h

Comments

Eric Anholt Aug. 30, 2009, 2:08 a.m. UTC | #1
On Tue, 2009-08-25 at 23:33 +0100, Chris Wilson wrote:
> By adding tracepoint equivalents for WATCH_BUF/EXEC we are able to monitor
> the lifetimes of objects, requests and significant events. These events can
> then be probed using the tracing frameworks, such as systemtap and, in
> particular, perf.
> 
> For example to record the stack trace for every GPU stall during a run, use
> 
>   $ perf record -e i915:i915_gem_request_wait_begin -c 1 -g
> 
> And
> 
>   $ perf report

Fail.  .config attached.

Kernel: arch/x86/boot/bzImage is ready  (#215)
  Building modules, stage 2.
  MODPOST 139 modules
ERROR: "__tracepoint_i915_gem_object_change_domain" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_object_destroy" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_request_wait_begin" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_object_clflush" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_request_wait_end" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_object_create" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_request_retire" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_object_bind" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_request_complete" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_object_unbind" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_object_get_fence" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_request_submit" [drivers/gpu/drm/i915/i915.ko] undefined!
ERROR: "__tracepoint_i915_gem_request_flush" [drivers/gpu/drm/i915/i915.ko] undefined!
make[1]: *** [__modpost] Error 1
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 140bee1..437f366 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,7 @@ 
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 #include <linux/swap.h>
 #include <linux/pci.h>
 
@@ -1562,8 +1563,14 @@  i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 
 			if ((obj->write_domain & flush_domains) ==
 			    obj->write_domain) {
+				uint32_t old_write_domain = obj->write_domain;
+
 				obj->write_domain = 0;
 				i915_gem_object_move_to_active(obj, seqno);
+
+				trace_i915_gem_object_change_domain(obj,
+								    obj->read_domains,
+								    old_write_domain);
 			}
 		}
 
@@ -1608,6 +1615,8 @@  i915_gem_retire_request(struct drm_device *dev,
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
+	trace_i915_gem_request_retire(dev, request->seqno);
+
 	/* Move any buffers on the active list that are no longer referenced
 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
 	 */
@@ -1748,6 +1757,8 @@  i915_wait_request(struct drm_device *dev, uint32_t seqno)
 			i915_driver_irq_postinstall(dev);
 		}
 
+		trace_i915_gem_request_wait_begin(dev, seqno);
+
 		dev_priv->mm.waiting_gem_seqno = seqno;
 		i915_user_irq_get(dev);
 		ret = wait_event_interruptible(dev_priv->irq_queue,
@@ -1756,6 +1767,8 @@  i915_wait_request(struct drm_device *dev, uint32_t seqno)
 					       dev_priv->mm.wedged);
 		i915_user_irq_put(dev);
 		dev_priv->mm.waiting_gem_seqno = 0;
+
+		trace_i915_gem_request_wait_end(dev, seqno);
 	}
 	if (dev_priv->mm.wedged)
 		ret = -EIO;
@@ -1788,6 +1801,8 @@  i915_gem_flush(struct drm_device *dev,
 	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
 		  invalidate_domains, flush_domains);
 #endif
+	trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+				     invalidate_domains, flush_domains);
 
 	if (flush_domains & I915_GEM_DOMAIN_CPU)
 		drm_agp_chipset_flush(dev);
@@ -1941,6 +1956,8 @@  i915_gem_object_unbind(struct drm_gem_object *obj)
 	if (!list_empty(&obj_priv->list))
 		list_del_init(&obj_priv->list);
 
+	trace_i915_gem_object_unbind(obj);
+
 	return 0;
 }
 
@@ -2309,6 +2326,8 @@  try_again:
 	else
 		i830_write_fence_reg(reg);
 
+	trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
 	return 0;
 }
 
@@ -2476,6 +2495,8 @@  i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
 	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
 
+	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
 	return 0;
 }
 
@@ -2491,6 +2512,8 @@  i915_gem_clflush_object(struct drm_gem_object *obj)
 	if (obj_priv->pages == NULL)
 		return;
 
+	trace_i915_gem_object_clflush(obj);
+
 	/* XXX: The 865 in particular appears to be weird in how it handles
 	 * cache flushing.  We haven't figured it out, but the
 	 * clflush+agp_chipset_flush doesn't appear to successfully get the
@@ -2510,21 +2533,29 @@  i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	uint32_t seqno;
+	uint32_t old_write_domain;
 
 	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
 		return;
 
 	/* Queue the GPU write cache flushing we need. */
+	old_write_domain = obj->write_domain;
 	i915_gem_flush(dev, 0, obj->write_domain);
 	seqno = i915_add_request(dev, NULL, obj->write_domain);
 	obj->write_domain = 0;
 	i915_gem_object_move_to_active(obj, seqno);
+
+	trace_i915_gem_object_change_domain(obj,
+					    obj->read_domains,
+					    old_write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
 {
+	uint32_t old_write_domain;
+
 	if (obj->write_domain != I915_GEM_DOMAIN_GTT)
 		return;
 
@@ -2532,7 +2563,12 @@  i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
 	 * to it immediately go to main memory as far as we know, so there's
 	 * no chipset flush.  It also doesn't land in render cache.
 	 */
+	old_write_domain = obj->write_domain;
 	obj->write_domain = 0;
+
+	trace_i915_gem_object_change_domain(obj,
+					    obj->read_domains,
+					    old_write_domain);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
@@ -2540,13 +2576,19 @@  static void
 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
+	uint32_t old_write_domain;
 
 	if (obj->write_domain != I915_GEM_DOMAIN_CPU)
 		return;
 
 	i915_gem_clflush_object(obj);
 	drm_agp_chipset_flush(dev);
+	old_write_domain = obj->write_domain;
 	obj->write_domain = 0;
+
+	trace_i915_gem_object_change_domain(obj,
+					    obj->read_domains,
+					    old_write_domain);
 }
 
 /**
@@ -2559,6 +2601,7 @@  int
 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
 	/* Not valid to be called on unbound objects. */
@@ -2571,6 +2614,9 @@  i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 	if (ret != 0)
 		return ret;
 
+	old_write_domain = obj->write_domain;
+	old_read_domains = obj->read_domains;
+
 	/* If we're writing through the GTT domain, then CPU and GPU caches
 	 * will need to be invalidated at next use.
 	 */
@@ -2589,6 +2635,10 @@  i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 		obj_priv->dirty = 1;
 	}
 
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
 	return 0;
 }
 
@@ -2601,6 +2651,7 @@  i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 static int
 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 {
+	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
 	i915_gem_object_flush_gpu_write_domain(obj);
@@ -2616,6 +2667,9 @@  i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 	 */
 	i915_gem_object_set_to_full_cpu_read_domain(obj);
 
+	old_write_domain = obj->write_domain;
+	old_read_domains = obj->read_domains;
+
 	/* Flush the CPU cache if it's still invalid. */
 	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
 		i915_gem_clflush_object(obj);
@@ -2636,6 +2690,10 @@  i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 		obj->write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
 	return 0;
 }
 
@@ -2757,6 +2815,7 @@  i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
 	uint32_t			invalidate_domains = 0;
 	uint32_t			flush_domains = 0;
+	uint32_t			old_read_domains;
 
 	BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
 	BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
@@ -2801,6 +2860,8 @@  i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 		i915_gem_clflush_object(obj);
 	}
 
+	old_read_domains = obj->read_domains;
+
 	/* The actual obj->write_domain will be updated with
 	 * pending_write_domain after we emit the accumulated flush for all
 	 * of our domain changes in execbuffers (which clears objects'
@@ -2819,6 +2880,10 @@  i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 		 obj->read_domains, obj->write_domain,
 		 dev->invalidate_domains, dev->flush_domains);
 #endif
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    obj->write_domain);
 }
 
 /**
@@ -2871,6 +2936,7 @@  i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 					  uint64_t offset, uint64_t size)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	uint32_t old_read_domains;
 	int i, ret;
 
 	if (offset == 0 && size == obj->size)
@@ -2917,8 +2983,13 @@  i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 	 */
 	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
+	old_read_domains = obj->read_domains;
 	obj->read_domains |= I915_GEM_DOMAIN_CPU;
 
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    obj->write_domain);
+
 	return 0;
 }
 
@@ -3104,6 +3175,8 @@  i915_dispatch_gem_execbuffer(struct drm_device *dev,
 	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
 	exec_len = (uint32_t) exec->batch_len;
 
+	trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
+
 	count = nbox ? nbox : 1;
 
 	for (i = 0; i < count; i++) {
@@ -3467,8 +3540,12 @@  i915_gem_execbuffer(struct drm_device *dev, void *data,
 
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
+		uint32_t old_write_domain = obj->write_domain;
 
 		obj->write_domain = obj->pending_write_domain;
+		trace_i915_gem_object_change_domain(obj,
+						    obj->read_domains,
+						    old_write_domain);
 	}
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3807,6 +3884,8 @@  int i915_gem_init_object(struct drm_gem_object *obj)
 	obj_priv->fence_reg = I915_FENCE_REG_NONE;
 	INIT_LIST_HEAD(&obj_priv->list);
 
+	trace_i915_gem_object_create(obj);
+
 	return 0;
 }
 
@@ -3815,6 +3894,8 @@  void i915_gem_free_object(struct drm_gem_object *obj)
 	struct drm_device *dev = obj->dev;
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
+	trace_i915_gem_object_destroy(obj);
+
 	while (obj_priv->pin_count > 0)
 		i915_gem_object_unpin(obj);
 
@@ -3941,24 +4022,36 @@  i915_gem_idle(struct drm_device *dev)
 	 * the GPU domains and just stuff them onto inactive.
 	 */
 	while (!list_empty(&dev_priv->mm.active_list)) {
-		struct drm_i915_gem_object *obj_priv;
+		struct drm_gem_object *obj;
+		uint32_t old_write_domain;
 
-		obj_priv = list_first_entry(&dev_priv->mm.active_list,
-					    struct drm_i915_gem_object,
-					    list);
-		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-		i915_gem_object_move_to_inactive(obj_priv->obj);
+		obj = list_first_entry(&dev_priv->mm.active_list,
+				       struct drm_i915_gem_object,
+				       list)->obj;
+		old_write_domain = obj->write_domain;
+		obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+		i915_gem_object_move_to_inactive(obj);
+
+		trace_i915_gem_object_change_domain(obj,
+						    obj->read_domains,
+						    old_write_domain);
 	}
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	while (!list_empty(&dev_priv->mm.flushing_list)) {
-		struct drm_i915_gem_object *obj_priv;
-
-		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-					    struct drm_i915_gem_object,
-					    list);
-		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
-		i915_gem_object_move_to_inactive(obj_priv->obj);
+		struct drm_gem_object *obj;
+		uint32_t old_write_domain;
+
+		obj = list_first_entry(&dev_priv->mm.flushing_list,
+				       struct drm_i915_gem_object,
+				       list)->obj;
+		old_write_domain = obj->write_domain;
+		obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+		i915_gem_object_move_to_inactive(obj);
+
+		trace_i915_gem_object_change_domain(obj,
+						    obj->read_domains,
+						    old_write_domain);
 	}
 
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7ebc84c..3baf252 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,6 +31,7 @@ 
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "i915_trace.h"
 #include "intel_drv.h"
 
 #define MAX_NOPID ((u32)~0)
@@ -279,7 +280,9 @@  irqreturn_t igdng_irq_handler(struct drm_device *dev)
 		}
 
 		if (gt_iir & GT_USER_INTERRUPT) {
-			dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+			u32 seqno = i915_get_gem_seqno(dev);
+			dev_priv->mm.irq_gem_seqno = seqno;
+			trace_i915_gem_request_complete(dev, seqno);
 			DRM_WAKEUP(&dev_priv->irq_queue);
 		}
 
@@ -578,7 +581,9 @@  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		}
 
 		if (iir & I915_USER_INTERRUPT) {
-			dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+			u32 seqno = i915_get_gem_seqno(dev);
+			dev_priv->mm.irq_gem_seqno = seqno;
+			trace_i915_gem_request_complete(dev, seqno);
 			DRM_WAKEUP(&dev_priv->irq_queue);
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 0000000..672973e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,281 @@ 
+#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I915_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE i915_trace
+
+/* object tracking */
+
+TRACE_EVENT(i915_gem_object_create,
+
+	    TP_PROTO(struct drm_gem_object *obj),
+
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_gem_object *, obj)
+			     __field(u32, size)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->size = obj->size;
+			   ),
+
+	    TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_bind,
+
+	    TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+
+	    TP_ARGS(obj, gtt_offset),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_gem_object *, obj)
+			     __field(u32, gtt_offset)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->gtt_offset = gtt_offset;
+			   ),
+
+	    TP_printk("obj=%p, gtt_offset=%08x",
+		      __entry->obj, __entry->gtt_offset)
+);
+
+TRACE_EVENT(i915_gem_object_clflush,
+
+	    TP_PROTO(struct drm_gem_object *obj),
+
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_gem_object *, obj)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   ),
+
+	    TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+
+	    TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+
+	    TP_ARGS(obj, old_read_domains, old_write_domain),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_gem_object *, obj)
+			     __field(u32, read_domains)
+			     __field(u32, write_domain)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->read_domains = obj->read_domains | (old_read_domains << 16);
+			   __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+			   ),
+
+	    TP_printk("obj=%p, read=%04x, write=%04x",
+		      __entry->obj,
+		      __entry->read_domains, __entry->write_domain)
+);
+
+TRACE_EVENT(i915_gem_object_get_fence,
+
+	    TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
+
+	    TP_ARGS(obj, fence, tiling_mode),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_gem_object *, obj)
+			     __field(int, fence)
+			     __field(int, tiling_mode)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->fence = fence;
+			   __entry->tiling_mode = tiling_mode;
+			   ),
+
+	    TP_printk("obj=%p, fence=%d, tiling=%d",
+		      __entry->obj, __entry->fence, __entry->tiling_mode)
+);
+
+TRACE_EVENT(i915_gem_object_unbind,
+
+	    TP_PROTO(struct drm_gem_object *obj),
+
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_gem_object *, obj)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   ),
+
+	    TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_destroy,
+
+	    TP_PROTO(struct drm_gem_object *obj),
+
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_gem_object *, obj)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   ),
+
+	    TP_printk("obj=%p", __entry->obj)
+);
+
+/* batch tracing */
+
+TRACE_EVENT(i915_gem_request_submit,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_device *, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_flush,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno,
+		     u32 flush_domains, u32 invalidate_domains),
+
+	    TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_device *, dev)
+			     __field(u32, seqno)
+			     __field(u32, flush_domains)
+			     __field(u32, invalidate_domains)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev;
+			   __entry->seqno = seqno;
+			   __entry->flush_domains = flush_domains;
+			   __entry->invalidate_domains = invalidate_domains;
+			   ),
+
+	    TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
+		      __entry->dev, __entry->seqno,
+		      __entry->flush_domains, __entry->invalidate_domains)
+);
+
+
+TRACE_EVENT(i915_gem_request_complete,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_device *, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_retire,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_device *, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_begin,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_device *, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_device *, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+#endif /* _I915_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#include <trace/define_trace.h>