diff mbox

[05/29] drm/i915: Replace last_[rwf]_seqno with last_[rwf]_req

Message ID 1414694481-15724-6-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Oct. 30, 2014, 6:40 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The object structure contains the last read, write and fenced seqno values for
use in syncrhonisation operations. These have now been replaced with their
request structure counterparts.

Note that to ensure that objects do not end up with dangling pointers, the
assignments of last_*_req include reference count updates. Thus a request cannot
be freed if an object is still hanging on to it for any reason.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |    6 +--
 drivers/gpu/drm/i915/i915_drv.h            |    6 +--
 drivers/gpu/drm/i915/i915_gem.c            |   68 ++++++++++++++++------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |    6 +--
 drivers/gpu/drm/i915/i915_gem_gtt.h        |    4 +-
 drivers/gpu/drm/i915/i915_gem_tiling.c     |    2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |    4 +-
 drivers/gpu/drm/i915/intel_display.c       |   10 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.h    |    2 +-
 9 files changed, 58 insertions(+), 50 deletions(-)

Comments

Thomas Daniel Nov. 11, 2014, 11:39 a.m. UTC | #1
> -----Original Message-----

> From: Intel-gfx [mailto:intel-gfx-bounces@lists.freedesktop.org] On Behalf

> Of John.C.Harrison@Intel.com

> Sent: Thursday, October 30, 2014 6:41 PM

> To: Intel-GFX@Lists.FreeDesktop.Org

> Subject: [Intel-gfx] [PATCH 05/29] drm/i915: Replace last_[rwf]_seqno with

> last_[rwf]_req

> 

> From: John Harrison <John.C.Harrison@Intel.com>

> 

> The object structure contains the last read, write and fenced seqno values

> for use in syncrhonisation operations. These have now been replaced with

> their request structure counterparts.

> 

> Note that to ensure that objects do not end up with dangling pointers, the

> assignments of last_*_req include reference count updates. Thus a request

> cannot be freed if an object is still hanging on to it for any reason.

> 

> For: VIZ-4377

> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>

> ---

>  drivers/gpu/drm/i915/i915_debugfs.c        |    6 +--

>  drivers/gpu/drm/i915/i915_drv.h            |    6 +--

>  drivers/gpu/drm/i915/i915_gem.c            |   68 ++++++++++++++++------------

>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |    6 +--

>  drivers/gpu/drm/i915/i915_gem_gtt.h        |    4 +-

>  drivers/gpu/drm/i915/i915_gem_tiling.c     |    2 +-

>  drivers/gpu/drm/i915/i915_gpu_error.c      |    4 +-

>  drivers/gpu/drm/i915/intel_display.c       |   10 ++--

>  drivers/gpu/drm/i915/intel_ringbuffer.h    |    2 +-

>  9 files changed, 58 insertions(+), 50 deletions(-)

> 

> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c

> b/drivers/gpu/drm/i915/i915_debugfs.c

> index a79f83c..79ce5c0 100644

> --- a/drivers/gpu/drm/i915/i915_debugfs.c

> +++ b/drivers/gpu/drm/i915/i915_debugfs.c

> @@ -133,9 +133,9 @@ describe_obj(struct seq_file *m, struct

> drm_i915_gem_object *obj)

>  		   obj->base.size / 1024,

>  		   obj->base.read_domains,

>  		   obj->base.write_domain,

> -		   obj->last_read_seqno,

> -		   obj->last_write_seqno,

> -		   obj->last_fenced_seqno,

> +		   i915_gem_request_get_seqno(obj->last_read_req),

> +		   i915_gem_request_get_seqno(obj->last_write_req),

> +		   i915_gem_request_get_seqno(obj->last_fenced_req),

>  		   i915_cache_level_str(to_i915(obj->base.dev), obj-

> >cache_level),

>  		   obj->dirty ? " dirty" : "",

>  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");

> diff --git a/drivers/gpu/drm/i915/i915_drv.h

> b/drivers/gpu/drm/i915/i915_drv.h index 7a043b8..059ec43 100644

> --- a/drivers/gpu/drm/i915/i915_drv.h

> +++ b/drivers/gpu/drm/i915/i915_drv.h

> @@ -1871,10 +1871,10 @@ struct drm_i915_gem_object {

>  	struct intel_engine_cs *ring;

> 

>  	/** Breadcrumb of last rendering to the buffer. */

> -	uint32_t last_read_seqno;

> -	uint32_t last_write_seqno;

> +	struct drm_i915_gem_request *last_read_req;

> +	struct drm_i915_gem_request *last_write_req;

>  	/** Breadcrumb of last fenced GPU access to the buffer. */

> -	uint32_t last_fenced_seqno;

> +	struct drm_i915_gem_request *last_fenced_req;

> 

>  	/** Current tiling stride for the object, if it's tiled. */

>  	uint32_t stride;

> diff --git a/drivers/gpu/drm/i915/i915_gem.c

> b/drivers/gpu/drm/i915/i915_gem.c index d46a70d..a28432c 100644

> --- a/drivers/gpu/drm/i915/i915_gem.c

> +++ b/drivers/gpu/drm/i915/i915_gem.c

> @@ -1289,11 +1289,11 @@ i915_gem_object_wait_rendering__tail(struct

> drm_i915_gem_object *obj)

>  	/* Manually manage the write flush as we may have not yet

>  	 * retired the buffer.

>  	 *

> -	 * Note that the last_write_seqno is always the earlier of

> -	 * the two (read/write) seqno, so if we haved successfully waited,

> +	 * Note that the last_write_req is always the earlier of

> +	 * the two (read/write) requests, so if we haved successfully waited,

>  	 * we know we have passed the last write.

>  	 */

> -	obj->last_write_seqno = 0;

> +	i915_gem_request_assign(&obj->last_write_req, NULL);

> 

>  	return 0;

>  }

> @@ -1306,14 +1306,18 @@ static __must_check int

> i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,

>  			       bool readonly)

>  {

> +	struct drm_i915_gem_request *req;

>  	struct intel_engine_cs *ring = obj->ring;

>  	u32 seqno;

>  	int ret;

> 

> -	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;

> -	if (seqno == 0)

> +	req = readonly ? obj->last_write_req : obj->last_read_req;

> +	if (!req)

>  		return 0;

> 

> +	seqno = i915_gem_request_get_seqno(req);

> +	WARN_ON(seqno == 0);

> +

>  	ret = i915_wait_seqno(ring, seqno);

>  	if (ret)

>  		return ret;

> @@ -1329,6 +1333,7 @@

> i915_gem_object_wait_rendering__nonblocking(struct

> drm_i915_gem_object *obj,

>  					    struct drm_i915_file_private

> *file_priv,

>  					    bool readonly)

>  {

> +	struct drm_i915_gem_request *req;

>  	struct drm_device *dev = obj->base.dev;

>  	struct drm_i915_private *dev_priv = dev->dev_private;

>  	struct intel_engine_cs *ring = obj->ring; @@ -1339,10 +1344,13 @@

> i915_gem_object_wait_rendering__nonblocking(struct

> drm_i915_gem_object *obj,

>  	BUG_ON(!mutex_is_locked(&dev->struct_mutex));

>  	BUG_ON(!dev_priv->mm.interruptible);

> 

> -	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;

> -	if (seqno == 0)

> +	req = readonly ? obj->last_write_req : obj->last_read_req;

> +	if (!req)

>  		return 0;

> 

> +	seqno = i915_gem_request_get_seqno(req);

> +	WARN_ON(seqno == 0);

> +

>  	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);

>  	if (ret)

>  		return ret;

> @@ -2179,12 +2187,12 @@ static void

>  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,

>  			       struct intel_engine_cs *ring)  {

> -	u32 seqno = intel_ring_get_seqno(ring);

> +	struct drm_i915_gem_request *req = intel_ring_get_request(ring);

> 

>  	BUG_ON(ring == NULL);

> -	if (obj->ring != ring && obj->last_write_seqno) {

> -		/* Keep the seqno relative to the current ring */

> -		obj->last_write_seqno = seqno;

> +	if (obj->ring != ring && obj->last_write_req) {

> +		/* Keep the request relative to the current ring */

> +		i915_gem_request_assign(&obj->last_write_req, req);

>  	}

>  	obj->ring = ring;

> 

> @@ -2196,7 +2204,7 @@ i915_gem_object_move_to_active(struct

> drm_i915_gem_object *obj,

> 

>  	list_move_tail(&obj->ring_list, &ring->active_list);

> 

> -	obj->last_read_seqno = seqno;

> +	i915_gem_request_assign(&obj->last_read_req, req);

>  }

> 

>  void i915_vma_move_to_active(struct i915_vma *vma, @@ -2227,11

> +2235,11 @@ i915_gem_object_move_to_inactive(struct

> drm_i915_gem_object *obj)

>  	list_del_init(&obj->ring_list);

>  	obj->ring = NULL;

> 

> -	obj->last_read_seqno = 0;

> -	obj->last_write_seqno = 0;

> +	i915_gem_request_assign(&obj->last_read_req, NULL);

> +	i915_gem_request_assign(&obj->last_write_req, NULL);

>  	obj->base.write_domain = 0;

> 

> -	obj->last_fenced_seqno = 0;

> +	i915_gem_request_assign(&obj->last_fenced_req, NULL);

> 

>  	obj->active = 0;

>  	drm_gem_object_unreference(&obj->base);

> @@ -2248,7 +2256,7 @@ i915_gem_object_retire(struct

> drm_i915_gem_object *obj)

>  		return;

> 

>  	if (i915_seqno_passed(ring->get_seqno(ring, true),

> -			      obj->last_read_seqno))

> +			      i915_gem_request_get_seqno(obj-

> >last_read_req)))

>  		i915_gem_object_move_to_inactive(obj);

>  }

> 

> @@ -2663,7 +2671,7 @@ i915_gem_retire_requests_ring(struct

> intel_engine_cs *ring)

>  				      struct drm_i915_gem_object,

>  				      ring_list);

> 

> -		if (!i915_seqno_passed(seqno, obj->last_read_seqno))

> +		if (!i915_seqno_passed(seqno,

> +i915_gem_request_get_seqno(obj->last_read_req)))

>  			break;

> 

>  		i915_gem_object_move_to_inactive(obj);

> @@ -2773,7 +2781,7 @@ i915_gem_object_flush_active(struct

> drm_i915_gem_object *obj)

>  	int ret;

> 

>  	if (obj->active) {

> -		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);

> +		ret = i915_gem_check_olr(obj->ring,

> +i915_gem_request_get_seqno(obj->last_read_req));

>  		if (ret)

>  			return ret;

> 

> @@ -2834,13 +2842,12 @@ i915_gem_wait_ioctl(struct drm_device *dev,

> void *data, struct drm_file *file)

>  	if (ret)

>  		goto out;

> 

> -	if (obj->active) {

> -		seqno = obj->last_read_seqno;

> -		ring = obj->ring;

> -	}

> +	if (!obj->active || !obj->last_read_req)

> +		goto out;

> 

> -	if (seqno == 0)

> -		 goto out;

> +	seqno = i915_gem_request_get_seqno(obj->last_read_req);

> +	WARN_ON(seqno == 0);

> +	ring = obj->ring;

> 

>  	/* Do this after OLR check to make sure we make forward progress

> polling

>  	 * on this IOCTL with a timeout <=0 (like busy ioctl) @@ -2891,7

> +2898,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,

> 

>  	idx = intel_ring_sync_index(from, to);

> 

> -	seqno = obj->last_read_seqno;

> +	seqno = i915_gem_request_get_seqno(obj->last_read_req);

>  	/* Optimization: Avoid semaphore sync when we are sure we

> already

>  	 * waited for an object with higher seqno */

>  	if (seqno <= from->semaphore.sync_seqno[idx]) @@ -2904,11

> +2911,12 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,

>  	trace_i915_gem_ring_sync_to(from, to, seqno);

>  	ret = to->semaphore.sync_to(to, from, seqno);

>  	if (!ret)

> -		/* We use last_read_seqno because sync_to()

> +		/* We use last_read_req because sync_to()

>  		 * might have just caused seqno wrap under

>  		 * the radar.

>  		 */

> -		from->semaphore.sync_seqno[idx] = obj->last_read_seqno;

> +		from->semaphore.sync_seqno[idx] =

> +				i915_gem_request_get_seqno(obj-

> >last_read_req);

> 

>  	return ret;

>  }

> @@ -3222,12 +3230,12 @@ static void

> i915_gem_object_update_fence(struct drm_i915_gem_object *obj,  static

> int  i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)  {

> -	if (obj->last_fenced_seqno) {

> -		int ret = i915_wait_seqno(obj->ring, obj-

> >last_fenced_seqno);

> +	if (obj->last_fenced_req) {

> +		int ret = i915_wait_seqno(obj->ring,

> +i915_gem_request_get_seqno(obj->last_fenced_req));

>  		if (ret)

>  			return ret;

> 

> -		obj->last_fenced_seqno = 0;

> +		i915_gem_request_assign(&obj->last_fenced_req, NULL);

>  	}

> 

>  	return 0;

> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c

> b/drivers/gpu/drm/i915/i915_gem_execbuffer.c

> index 4b7f5c1..7ed09fc 100644

> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c

> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c

> @@ -943,7 +943,7 @@ void

>  i915_gem_execbuffer_move_to_active(struct list_head *vmas,

>  				   struct intel_engine_cs *ring)

>  {

> -	u32 seqno = intel_ring_get_seqno(ring);

> +	struct drm_i915_gem_request *req = intel_ring_get_request(ring);

>  	struct i915_vma *vma;

> 

>  	list_for_each_entry(vma, vmas, exec_list) { @@ -960,7 +960,7 @@

> i915_gem_execbuffer_move_to_active(struct list_head *vmas,

>  		i915_vma_move_to_active(vma, ring);

>  		if (obj->base.write_domain) {

>  			obj->dirty = 1;

> -			obj->last_write_seqno = seqno;

> +			i915_gem_request_assign(&obj->last_write_req,

> req);

> 

>  			intel_fb_obj_invalidate(obj, ring);

> 

> @@ -968,7 +968,7 @@ i915_gem_execbuffer_move_to_active(struct

> list_head *vmas,

>  			obj->base.write_domain &=

> ~I915_GEM_GPU_DOMAINS;

>  		}

>  		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {

> -			obj->last_fenced_seqno = seqno;

> +			i915_gem_request_assign(&obj->last_fenced_req,

> req);

>  			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {

>  				struct drm_i915_private *dev_priv =

> to_i915(ring->dev);

>  				list_move_tail(&dev_priv->fence_regs[obj-

> >fence_reg].lru_list,

> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h

> b/drivers/gpu/drm/i915/i915_gem_gtt.h

> index d0562d0..69d9ba1 100644

> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h

> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h

> @@ -182,7 +182,7 @@ struct i915_address_space {

>  	 * List of objects currently involved in rendering.

>  	 *

>  	 * Includes buffers having the contents of their GPU caches

> -	 * flushed, not necessarily primitives.  last_rendering_seqno

> +	 * flushed, not necessarily primitives.  last_rendering_req

Last_rendering_req does not exist.  Do you mean last_read_req?

>  	 * represents when the rendering involved will be completed.

>  	 *

>  	 * A reference is held on the buffer while on this list.

> @@ -193,7 +193,7 @@ struct i915_address_space {

>  	 * LRU list of objects which are not in the ringbuffer and

>  	 * are ready to unbind, but are still in the GTT.

>  	 *

> -	 * last_rendering_seqno is 0 while an object is in this list.

> +	 * last_rendering_req is NULL while an object is in this list.

Same again.

Thomas.

>  	 *

>  	 * A reference is not held on the buffer while on this list,

>  	 * as merely being GTT-bound shouldn't prevent its being diff --git

> a/drivers/gpu/drm/i915/i915_gem_tiling.c

> b/drivers/gpu/drm/i915/i915_gem_tiling.c

> index d1e7a3e..243ba12 100644

> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c

> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c

> @@ -394,7 +394,7 @@ i915_gem_set_tiling(struct drm_device *dev, void

> *data,

> 

>  		if (ret == 0) {

>  			obj->fence_dirty =

> -				obj->last_fenced_seqno ||

> +				obj->last_fenced_req ||

>  				obj->fence_reg != I915_FENCE_REG_NONE;

> 

>  			obj->tiling_mode = args->tiling_mode; diff --git

> a/drivers/gpu/drm/i915/i915_gpu_error.c

> b/drivers/gpu/drm/i915/i915_gpu_error.c

> index d17360b..10ba188 100644

> --- a/drivers/gpu/drm/i915/i915_gpu_error.c

> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c

> @@ -668,8 +668,8 @@ static void capture_bo(struct drm_i915_error_buffer

> *err,

> 

>  	err->size = obj->base.size;

>  	err->name = obj->base.name;

> -	err->rseqno = obj->last_read_seqno;

> -	err->wseqno = obj->last_write_seqno;

> +	err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);

> +	err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);

>  	err->gtt_offset = vma->node.start;

>  	err->read_domains = obj->base.read_domains;

>  	err->write_domain = obj->base.write_domain; diff --git

> a/drivers/gpu/drm/i915/intel_display.c

> b/drivers/gpu/drm/i915/intel_display.c

> index 92f0005..51aae75 100644

> --- a/drivers/gpu/drm/i915/intel_display.c

> +++ b/drivers/gpu/drm/i915/intel_display.c

> @@ -9341,16 +9341,16 @@ static int intel_postpone_flip(struct

> drm_i915_gem_object *obj)

> 

>  	lockdep_assert_held(&obj->base.dev->struct_mutex);

> 

> -	if (!obj->last_write_seqno)

> +	if (!obj->last_write_req)

>  		return 0;

> 

>  	ring = obj->ring;

> 

>  	if (i915_seqno_passed(ring->get_seqno(ring, true),

> -			      obj->last_write_seqno))

> +			      i915_gem_request_get_seqno(obj-

> >last_write_req)))

>  		return 0;

> 

> -	ret = i915_gem_check_olr(ring, obj->last_write_seqno);

> +	ret = i915_gem_check_olr(ring,

> +i915_gem_request_get_seqno(obj->last_write_req));

>  	if (ret)

>  		return ret;

> 

> @@ -9412,7 +9412,7 @@ static int intel_queue_mmio_flip(struct drm_device

> *dev,

>  	}

> 

>  	spin_lock_irq(&dev_priv->mmio_flip_lock);

> -	intel_crtc->mmio_flip.seqno = obj->last_write_seqno;

> +	intel_crtc->mmio_flip.seqno =

> +i915_gem_request_get_seqno(obj->last_write_req);

>  	intel_crtc->mmio_flip.ring_id = obj->ring->id;

>  	spin_unlock_irq(&dev_priv->mmio_flip_lock);

> 

> @@ -9616,7 +9616,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,

>  		if (ret)

>  			goto cleanup_unpin;

> 

> -		work->flip_queued_seqno = obj->last_write_seqno;

> +		work->flip_queued_seqno =

> +i915_gem_request_get_seqno(obj->last_write_req);

>  		work->flip_queued_ring = obj->ring;

>  	} else {

>  		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, diff

> --git a/drivers/gpu/drm/i915/intel_ringbuffer.h

> b/drivers/gpu/drm/i915/intel_ringbuffer.h

> index cc1b62f..d98b964 100644

> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h

> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h

> @@ -249,7 +249,7 @@ struct  intel_engine_cs {

>  	 * ringbuffer.

>  	 *

>  	 * Includes buffers having the contents of their GPU caches

> -	 * flushed, not necessarily primitives.  last_rendering_seqno

> +	 * flushed, not necessarily primitives.  last_rendering_req

>  	 * represents when the rendering involved will be completed.

>  	 *

>  	 * A reference is held on the buffer while on this list.

> --

> 1.7.9.5

> 

> _______________________________________________

> Intel-gfx mailing list

> Intel-gfx@lists.freedesktop.org

> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a79f83c..79ce5c0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -133,9 +133,9 @@  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain,
-		   obj->last_read_seqno,
-		   obj->last_write_seqno,
-		   obj->last_fenced_seqno,
+		   i915_gem_request_get_seqno(obj->last_read_req),
+		   i915_gem_request_get_seqno(obj->last_write_req),
+		   i915_gem_request_get_seqno(obj->last_fenced_req),
 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a043b8..059ec43 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1871,10 +1871,10 @@  struct drm_i915_gem_object {
 	struct intel_engine_cs *ring;
 
 	/** Breadcrumb of last rendering to the buffer. */
-	uint32_t last_read_seqno;
-	uint32_t last_write_seqno;
+	struct drm_i915_gem_request *last_read_req;
+	struct drm_i915_gem_request *last_write_req;
 	/** Breadcrumb of last fenced GPU access to the buffer. */
-	uint32_t last_fenced_seqno;
+	struct drm_i915_gem_request *last_fenced_req;
 
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d46a70d..a28432c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1289,11 +1289,11 @@  i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
 	/* Manually manage the write flush as we may have not yet
 	 * retired the buffer.
 	 *
-	 * Note that the last_write_seqno is always the earlier of
-	 * the two (read/write) seqno, so if we haved successfully waited,
+	 * Note that the last_write_req is always the earlier of
+	 * the two (read/write) requests, so if we haved successfully waited,
 	 * we know we have passed the last write.
 	 */
-	obj->last_write_seqno = 0;
+	i915_gem_request_assign(&obj->last_write_req, NULL);
 
 	return 0;
 }
@@ -1306,14 +1306,18 @@  static __must_check int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			       bool readonly)
 {
+	struct drm_i915_gem_request *req;
 	struct intel_engine_cs *ring = obj->ring;
 	u32 seqno;
 	int ret;
 
-	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-	if (seqno == 0)
+	req = readonly ? obj->last_write_req : obj->last_read_req;
+	if (!req)
 		return 0;
 
+	seqno = i915_gem_request_get_seqno(req);
+	WARN_ON(seqno == 0);
+
 	ret = i915_wait_seqno(ring, seqno);
 	if (ret)
 		return ret;
@@ -1329,6 +1333,7 @@  i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 					    struct drm_i915_file_private *file_priv,
 					    bool readonly)
 {
+	struct drm_i915_gem_request *req;
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = obj->ring;
@@ -1339,10 +1344,13 @@  i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(!dev_priv->mm.interruptible);
 
-	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-	if (seqno == 0)
+	req = readonly ? obj->last_write_req : obj->last_read_req;
+	if (!req)
 		return 0;
 
+	seqno = i915_gem_request_get_seqno(req);
+	WARN_ON(seqno == 0);
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
 	if (ret)
 		return ret;
@@ -2179,12 +2187,12 @@  static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 			       struct intel_engine_cs *ring)
 {
-	u32 seqno = intel_ring_get_seqno(ring);
+	struct drm_i915_gem_request *req = intel_ring_get_request(ring);
 
 	BUG_ON(ring == NULL);
-	if (obj->ring != ring && obj->last_write_seqno) {
-		/* Keep the seqno relative to the current ring */
-		obj->last_write_seqno = seqno;
+	if (obj->ring != ring && obj->last_write_req) {
+		/* Keep the request relative to the current ring */
+		i915_gem_request_assign(&obj->last_write_req, req);
 	}
 	obj->ring = ring;
 
@@ -2196,7 +2204,7 @@  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
-	obj->last_read_seqno = seqno;
+	i915_gem_request_assign(&obj->last_read_req, req);
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2227,11 +2235,11 @@  i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 	list_del_init(&obj->ring_list);
 	obj->ring = NULL;
 
-	obj->last_read_seqno = 0;
-	obj->last_write_seqno = 0;
+	i915_gem_request_assign(&obj->last_read_req, NULL);
+	i915_gem_request_assign(&obj->last_write_req, NULL);
 	obj->base.write_domain = 0;
 
-	obj->last_fenced_seqno = 0;
+	i915_gem_request_assign(&obj->last_fenced_req, NULL);
 
 	obj->active = 0;
 	drm_gem_object_unreference(&obj->base);
@@ -2248,7 +2256,7 @@  i915_gem_object_retire(struct drm_i915_gem_object *obj)
 		return;
 
 	if (i915_seqno_passed(ring->get_seqno(ring, true),
-			      obj->last_read_seqno))
+			      i915_gem_request_get_seqno(obj->last_read_req)))
 		i915_gem_object_move_to_inactive(obj);
 }
 
@@ -2663,7 +2671,7 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 				      struct drm_i915_gem_object,
 				      ring_list);
 
-		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+		if (!i915_seqno_passed(seqno, i915_gem_request_get_seqno(obj->last_read_req)))
 			break;
 
 		i915_gem_object_move_to_inactive(obj);
@@ -2773,7 +2781,7 @@  i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 	int ret;
 
 	if (obj->active) {
-		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+		ret = i915_gem_check_olr(obj->ring, i915_gem_request_get_seqno(obj->last_read_req));
 		if (ret)
 			return ret;
 
@@ -2834,13 +2842,12 @@  i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	if (ret)
 		goto out;
 
-	if (obj->active) {
-		seqno = obj->last_read_seqno;
-		ring = obj->ring;
-	}
+	if (!obj->active || !obj->last_read_req)
+		goto out;
 
-	if (seqno == 0)
-		 goto out;
+	seqno = i915_gem_request_get_seqno(obj->last_read_req);
+	WARN_ON(seqno == 0);
+	ring = obj->ring;
 
 	/* Do this after OLR check to make sure we make forward progress polling
 	 * on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2891,7 +2898,7 @@  i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	idx = intel_ring_sync_index(from, to);
 
-	seqno = obj->last_read_seqno;
+	seqno = i915_gem_request_get_seqno(obj->last_read_req);
 	/* Optimization: Avoid semaphore sync when we are sure we already
 	 * waited for an object with higher seqno */
 	if (seqno <= from->semaphore.sync_seqno[idx])
@@ -2904,11 +2911,12 @@  i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	trace_i915_gem_ring_sync_to(from, to, seqno);
 	ret = to->semaphore.sync_to(to, from, seqno);
 	if (!ret)
-		/* We use last_read_seqno because sync_to()
+		/* We use last_read_req because sync_to()
 		 * might have just caused seqno wrap under
 		 * the radar.
 		 */
-		from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+		from->semaphore.sync_seqno[idx] =
+				i915_gem_request_get_seqno(obj->last_read_req);
 
 	return ret;
 }
@@ -3222,12 +3230,12 @@  static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 static int
 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
-	if (obj->last_fenced_seqno) {
-		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+	if (obj->last_fenced_req) {
+		int ret = i915_wait_seqno(obj->ring, i915_gem_request_get_seqno(obj->last_fenced_req));
 		if (ret)
 			return ret;
 
-		obj->last_fenced_seqno = 0;
+		i915_gem_request_assign(&obj->last_fenced_req, NULL);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4b7f5c1..7ed09fc 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -943,7 +943,7 @@  void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct intel_engine_cs *ring)
 {
-	u32 seqno = intel_ring_get_seqno(ring);
+	struct drm_i915_gem_request *req = intel_ring_get_request(ring);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -960,7 +960,7 @@  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		i915_vma_move_to_active(vma, ring);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
-			obj->last_write_seqno = seqno;
+			i915_gem_request_assign(&obj->last_write_req, req);
 
 			intel_fb_obj_invalidate(obj, ring);
 
@@ -968,7 +968,7 @@  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 		}
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-			obj->last_fenced_seqno = seqno;
+			i915_gem_request_assign(&obj->last_fenced_req, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
 				struct drm_i915_private *dev_priv = to_i915(ring->dev);
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d0562d0..69d9ba1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -182,7 +182,7 @@  struct i915_address_space {
 	 * List of objects currently involved in rendering.
 	 *
 	 * Includes buffers having the contents of their GPU caches
-	 * flushed, not necessarily primitives.  last_rendering_seqno
+	 * flushed, not necessarily primitives.  last_rendering_req
 	 * represents when the rendering involved will be completed.
 	 *
 	 * A reference is held on the buffer while on this list.
@@ -193,7 +193,7 @@  struct i915_address_space {
 	 * LRU list of objects which are not in the ringbuffer and
 	 * are ready to unbind, but are still in the GTT.
 	 *
-	 * last_rendering_seqno is 0 while an object is in this list.
+	 * last_rendering_req is NULL while an object is in this list.
 	 *
 	 * A reference is not held on the buffer while on this list,
 	 * as merely being GTT-bound shouldn't prevent its being
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d1e7a3e..243ba12 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -394,7 +394,7 @@  i915_gem_set_tiling(struct drm_device *dev, void *data,
 
 		if (ret == 0) {
 			obj->fence_dirty =
-				obj->last_fenced_seqno ||
+				obj->last_fenced_req ||
 				obj->fence_reg != I915_FENCE_REG_NONE;
 
 			obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d17360b..10ba188 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -668,8 +668,8 @@  static void capture_bo(struct drm_i915_error_buffer *err,
 
 	err->size = obj->base.size;
 	err->name = obj->base.name;
-	err->rseqno = obj->last_read_seqno;
-	err->wseqno = obj->last_write_seqno;
+	err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
+	err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
 	err->gtt_offset = vma->node.start;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 92f0005..51aae75 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9341,16 +9341,16 @@  static int intel_postpone_flip(struct drm_i915_gem_object *obj)
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	if (!obj->last_write_seqno)
+	if (!obj->last_write_req)
 		return 0;
 
 	ring = obj->ring;
 
 	if (i915_seqno_passed(ring->get_seqno(ring, true),
-			      obj->last_write_seqno))
+			      i915_gem_request_get_seqno(obj->last_write_req)))
 		return 0;
 
-	ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+	ret = i915_gem_check_olr(ring, i915_gem_request_get_seqno(obj->last_write_req));
 	if (ret)
 		return ret;
 
@@ -9412,7 +9412,7 @@  static int intel_queue_mmio_flip(struct drm_device *dev,
 	}
 
 	spin_lock_irq(&dev_priv->mmio_flip_lock);
-	intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+	intel_crtc->mmio_flip.seqno = i915_gem_request_get_seqno(obj->last_write_req);
 	intel_crtc->mmio_flip.ring_id = obj->ring->id;
 	spin_unlock_irq(&dev_priv->mmio_flip_lock);
 
@@ -9616,7 +9616,7 @@  static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		if (ret)
 			goto cleanup_unpin;
 
-		work->flip_queued_seqno = obj->last_write_seqno;
+		work->flip_queued_seqno = i915_gem_request_get_seqno(obj->last_write_req);
 		work->flip_queued_ring = obj->ring;
 	} else {
 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index cc1b62f..d98b964 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -249,7 +249,7 @@  struct  intel_engine_cs {
 	 * ringbuffer.
 	 *
 	 * Includes buffers having the contents of their GPU caches
-	 * flushed, not necessarily primitives.  last_rendering_seqno
+	 * flushed, not necessarily primitives.  last_rendering_req
 	 * represents when the rendering involved will be completed.
 	 *
 	 * A reference is held on the buffer while on this list.