diff mbox

[3/6] drm/i915: Remove the per-ring write list

Message ID 1342106019-17806-4-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson July 12, 2012, 3:13 p.m. UTC
This is now handled by a global flag to ensure we emit a flush before
the next serialisation point (if we failed to queue one previously).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |    2 -
 drivers/gpu/drm/i915/i915_gem.c            |   55 ++--------------------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   10 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c    |    2 -
 drivers/gpu/drm/i915/intel_ringbuffer.h    |    9 -----
 5 files changed, 7 insertions(+), 71 deletions(-)

Comments

Daniel Vetter July 12, 2012, 7:37 p.m. UTC | #1
On Thu, Jul 12, 2012 at 04:13:36PM +0100, Chris Wilson wrote:
> This is now handled by a global flag to ensure we emit a flush before
> the next serialisation point (if we failed to queue one previously).
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_drv.h            |    2 -
>  drivers/gpu/drm/i915/i915_gem.c            |   55 ++--------------------------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   10 ++---
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |    2 -
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |    9 -----
>  5 files changed, 7 insertions(+), 71 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 38b95be..7871760 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -865,8 +865,6 @@ struct drm_i915_gem_object {
>  	/** This object's place on the active/inactive lists */
>  	struct list_head ring_list;
>  	struct list_head mm_list;
> -	/** This object's place on GPU write list */
> -	struct list_head gpu_write_list;
>  	/** This object's place in the batchbuffer or on the eviction list */
>  	struct list_head exec_list;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f69d897..535dd61 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1465,7 +1465,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
>  
>  	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
>  
> -	BUG_ON(!list_empty(&obj->gpu_write_list));
>  	BUG_ON(!obj->active);
>  
>  	list_del_init(&obj->ring_list);
> @@ -1510,30 +1509,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
>  	return obj->madv == I915_MADV_DONTNEED;
>  }
>  
> -static void
> -i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
> -			       uint32_t flush_domains)
> -{
> -	struct drm_i915_gem_object *obj, *next;
> -
> -	list_for_each_entry_safe(obj, next,
> -				 &ring->gpu_write_list,
> -				 gpu_write_list) {
> -		if (obj->base.write_domain & flush_domains) {
> -			uint32_t old_write_domain = obj->base.write_domain;
> -
> -			obj->base.write_domain = 0;
> -			list_del_init(&obj->gpu_write_list);
> -			i915_gem_object_move_to_active(obj, ring,
> -						       i915_gem_next_request_seqno(ring));
> -
> -			trace_i915_gem_object_change_domain(obj,
> -							    obj->base.read_domains,
> -							    old_write_domain);
> -		}
> -	}
> -}
> -
>  static u32
>  i915_gem_get_seqno(struct drm_device *dev)
>  {
> @@ -1633,8 +1608,6 @@ i915_add_request(struct intel_ring_buffer *ring,
>  					   &dev_priv->mm.retire_work, HZ);
>  	}
>  
> -	WARN_ON(!list_empty(&ring->gpu_write_list));
> -
>  	return 0;
>  }
>  
> @@ -1677,7 +1650,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
>  				       ring_list);
>  
>  		obj->base.write_domain = 0;
> -		list_del_init(&obj->gpu_write_list);
> +		obj->pending_gpu_write = false;
>  		i915_gem_object_move_to_inactive(obj);

Hm, this hunk makes me wonder whether we don't leak some bogus state
accross a gpu reset. Can't we just reuse the move_to_inactive helper here,
ensuring that we consistenly clear up any active state?

Otherwise I couldn't poke any other holes into this patch series, but let
me sleep over it a bit first ...
-Daniel

>  	}
>  }
> @@ -2005,11 +1978,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
>  {
>  	int ret;
>  
> -	/* This function only exists to support waiting for existing rendering,
> -	 * not for emitting required flushes.
> -	 */
> -	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
> -
>  	/* If there is rendering queued on the buffer being evicted, wait for
>  	 * it.
>  	 */
> @@ -2017,6 +1985,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
>  		ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
>  		if (ret)
>  			return ret;
> +
>  		i915_gem_retire_requests_ring(obj->ring);
>  	}
>  
> @@ -2288,26 +2257,14 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
>  	if (ret)
>  		return ret;
>  
> -	if (flush_domains & I915_GEM_GPU_DOMAINS)
> -		i915_gem_process_flushing_list(ring, flush_domains);
> -
>  	return 0;
>  }
>  
>  static int i915_ring_idle(struct intel_ring_buffer *ring)
>  {
> -	int ret;
> -
> -	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
> +	if (list_empty(&ring->active_list))
>  		return 0;
>  
> -	if (!list_empty(&ring->gpu_write_list)) {
> -		ret = i915_gem_flush_ring(ring,
> -				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
> -		if (ret)
> -			return ret;
> -	}
> -
>  	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
>  }
>  
> @@ -2323,10 +2280,6 @@ int i915_gpu_idle(struct drm_device *dev)
>  		if (ret)
>  			return ret;
>  
> -		/* Is the device fubar? */
> -		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
> -			return -EBUSY;
> -
>  		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
>  		if (ret)
>  			return ret;
> @@ -3471,7 +3424,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
>  	INIT_LIST_HEAD(&obj->gtt_list);
>  	INIT_LIST_HEAD(&obj->ring_list);
>  	INIT_LIST_HEAD(&obj->exec_list);
> -	INIT_LIST_HEAD(&obj->gpu_write_list);
>  	obj->madv = I915_MADV_WILLNEED;
>  	/* Avoid an unnecessary call to unbind on the first bind. */
>  	obj->map_and_fenceable = true;
> @@ -3892,7 +3844,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
>  {
>  	INIT_LIST_HEAD(&ring->active_list);
>  	INIT_LIST_HEAD(&ring->request_list);
> -	INIT_LIST_HEAD(&ring->gpu_write_list);
>  }
>  
>  void
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 0c26692..ca48978 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -938,9 +938,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
>  	struct drm_i915_gem_object *obj;
>  
>  	list_for_each_entry(obj, objects, exec_list) {
> -		  u32 old_read = obj->base.read_domains;
> -		  u32 old_write = obj->base.write_domain;
> -
> +		u32 old_read = obj->base.read_domains;
> +		u32 old_write = obj->base.write_domain;
>  
>  		obj->base.read_domains = obj->base.pending_read_domains;
>  		obj->base.write_domain = obj->base.pending_write_domain;
> @@ -950,11 +949,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
>  		if (obj->base.write_domain) {
>  			obj->dirty = 1;
>  			obj->pending_gpu_write = true;
> -			list_move_tail(&obj->gpu_write_list,
> -				       &ring->gpu_write_list);
>  			if (obj->pin_count) /* check for potential scanout */
>  				intel_mark_busy(ring->dev, obj);
> -		}
> +		} else
> +			obj->pending_gpu_write = false;
>  
>  		trace_i915_gem_object_change_domain(obj, old_read, old_write);
>  	}
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index ddc4859..7c166ff 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1001,7 +1001,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
>  	ring->dev = dev;
>  	INIT_LIST_HEAD(&ring->active_list);
>  	INIT_LIST_HEAD(&ring->request_list);
> -	INIT_LIST_HEAD(&ring->gpu_write_list);
>  	ring->size = 32 * PAGE_SIZE;
>  
>  	init_waitqueue_head(&ring->irq_queue);
> @@ -1472,7 +1471,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
>  	ring->dev = dev;
>  	INIT_LIST_HEAD(&ring->active_list);
>  	INIT_LIST_HEAD(&ring->request_list);
> -	INIT_LIST_HEAD(&ring->gpu_write_list);
>  
>  	ring->size = size;
>  	ring->effective_size = ring->size;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 1d3c81f..7986f30 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -101,15 +101,6 @@ struct  intel_ring_buffer {
>  	struct list_head request_list;
>  
>  	/**
> -	 * List of objects currently pending a GPU write flush.
> -	 *
> -	 * All elements on this list will belong to either the
> -	 * active_list or flushing_list, last_rendering_seqno can
> -	 * be used to differentiate between the two elements.
> -	 */
> -	struct list_head gpu_write_list;
> -
> -	/**
>  	 * Do we have some not yet emitted requests outstanding?
>  	 */
>  	u32 outstanding_lazy_request;
> -- 
> 1.7.10.4
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson July 12, 2012, 8:07 p.m. UTC | #2
On Thu, 12 Jul 2012 21:37:16 +0200, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Thu, Jul 12, 2012 at 04:13:36PM +0100, Chris Wilson wrote:
> >  		obj->base.write_domain = 0;
> > -		list_del_init(&obj->gpu_write_list);
> > +		obj->pending_gpu_write = false;
> >  		i915_gem_object_move_to_inactive(obj);
> 
> Hm, this hunk makes me wonder whether we don't leak some bogus state
> accross a gpu reset. Can't we just reuse the move_to_inactive helper here,
> ensuring that we consistenly clear up any active state?

Yes. I had planned to finish off with another patch to remove that pair
of then redundant lines, but apparently forgot. I think it is better
expressed as a second patch, at least from the point of view of how I
was applying the transformations.
-Chris
Daniel Vetter July 13, 2012, 8:34 a.m. UTC | #3
On Thu, Jul 12, 2012 at 09:07:52PM +0100, Chris Wilson wrote:
> On Thu, 12 Jul 2012 21:37:16 +0200, Daniel Vetter <daniel@ffwll.ch> wrote:
> > On Thu, Jul 12, 2012 at 04:13:36PM +0100, Chris Wilson wrote:
> > >  		obj->base.write_domain = 0;
> > > -		list_del_init(&obj->gpu_write_list);
> > > +		obj->pending_gpu_write = false;
> > >  		i915_gem_object_move_to_inactive(obj);
> > 
> > Hm, this hunk makes me wonder whether we don't leak some bogus state
> > accross a gpu reset. Can't we just reuse the move_to_inactive helper here,
> > ensuring that we consistenly clear up any active state?
> 
> Yes. I had planned to finish off with another patch to remove that pair
> of then redundant lines, but apparently forgot. I think it is better
> expressed as a second patch, at least from the point of view of how I
> was applying the transformations.

Actually I've been confused yesterday, we do call move_to_inactive in the
next line ;-)

I've looked again at your patch, and the changes to how we handle
obj->pending_pug_write look buggy. The above hunk is superflous, because
move_to_inactive will do that.

The hunk int i915_gem_execbuffer's is buggy, we can't clear
pending_gpu_write there - we use that to decide whether we need to stall
for the gpu when the cpu does a read-only accesss. We then stall
completely because we don't keep track of the last write seqno separately.
-Daniel
Chris Wilson July 13, 2012, 8:49 a.m. UTC | #4
On Fri, 13 Jul 2012 10:34:43 +0200, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Thu, Jul 12, 2012 at 09:07:52PM +0100, Chris Wilson wrote:
> > On Thu, 12 Jul 2012 21:37:16 +0200, Daniel Vetter <daniel@ffwll.ch> wrote:
> > > On Thu, Jul 12, 2012 at 04:13:36PM +0100, Chris Wilson wrote:
> > > >  		obj->base.write_domain = 0;
> > > > -		list_del_init(&obj->gpu_write_list);
> > > > +		obj->pending_gpu_write = false;
> > > >  		i915_gem_object_move_to_inactive(obj);
> > > 
> > > Hm, this hunk makes me wonder whether we don't leak some bogus state
> > > accross a gpu reset. Can't we just reuse the move_to_inactive helper here,
> > > ensuring that we consistenly clear up any active state?
> > 
> > Yes. I had planned to finish off with another patch to remove that pair
> > of then redundant lines, but apparently forgot. I think it is better
> > expressed as a second patch, at least from the point of view of how I
> > was applying the transformations.
> 
> Actually I've been confused yesterday, we do call move_to_inactive in the
> next line ;-)
> 
> I've looked again at your patch, and the changes to how we handle
> obj->pending_pug_write look buggy. The above hunk is superflous, because
> move_to_inactive will do that.

Right, but the transformation is a two stage process. It was more
obvious to do the replacement of gpu_write_list with pending_gpu_write
and then do the removal of duplicate lines later.

> The hunk int i915_gem_execbuffer's is buggy, we can't clear
> pending_gpu_write there - we use that to decide whether we need to stall
> for the gpu when the cpu does a read-only accesss. We then stall
> completely because we don't keep track of the last write seqno separately.

No. Because by the time the previous breadcrumb has been seen we are
guarranteed to have flushed the gpu caches. So any wait in the future we
have flushed the caches before returning.

The next patch will be to remove the obsolete pending_gpu_write.
Hopefully that will make you feel calmer.
-Chris
Chris Wilson July 13, 2012, 8:53 a.m. UTC | #5
On Fri, 13 Jul 2012 09:49:48 +0100, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> No. Because by the time the previous breadcrumb has been seen we are
> guarranteed to have flushed the gpu caches. So any wait in the future we
> have flushed the caches before returning.

Egg on face time.

The issue is on the waiter side, since we don't wait unless
pending_gpu_write is set. Tracking the last write seqno seems safest
when removing the gpu_write_list.
-Chris
Daniel Vetter July 13, 2012, 9:05 a.m. UTC | #6
On Fri, Jul 13, 2012 at 09:53:33AM +0100, Chris Wilson wrote:
> On Fri, 13 Jul 2012 09:49:48 +0100, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > No. Because by the time the previous breadcrumb has been seen we are
> > guarranteed to have flushed the gpu caches. So any wait in the future we
> > have flushed the caches before returning.
> 
> Egg on face time.
> 
> The issue is on the waiter side, since we don't wait unless
> pending_gpu_write is set. Tracking the last write seqno seems safest
> when removing the gpu_write_list.

Imo tracking the last write seqno would be an extension (i.e. separate
patch), the current code that just tracks pending_gpu_write should work
(and if I haven't botched up the i-g-t tests, it should be able to catch
that kind of stuff).
-Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 38b95be..7871760 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -865,8 +865,6 @@  struct drm_i915_gem_object {
 	/** This object's place on the active/inactive lists */
 	struct list_head ring_list;
 	struct list_head mm_list;
-	/** This object's place on GPU write list */
-	struct list_head gpu_write_list;
 	/** This object's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f69d897..535dd61 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1465,7 +1465,6 @@  i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 
 	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-	BUG_ON(!list_empty(&obj->gpu_write_list));
 	BUG_ON(!obj->active);
 
 	list_del_init(&obj->ring_list);
@@ -1510,30 +1509,6 @@  i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 	return obj->madv == I915_MADV_DONTNEED;
 }
 
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
-			       uint32_t flush_domains)
-{
-	struct drm_i915_gem_object *obj, *next;
-
-	list_for_each_entry_safe(obj, next,
-				 &ring->gpu_write_list,
-				 gpu_write_list) {
-		if (obj->base.write_domain & flush_domains) {
-			uint32_t old_write_domain = obj->base.write_domain;
-
-			obj->base.write_domain = 0;
-			list_del_init(&obj->gpu_write_list);
-			i915_gem_object_move_to_active(obj, ring,
-						       i915_gem_next_request_seqno(ring));
-
-			trace_i915_gem_object_change_domain(obj,
-							    obj->base.read_domains,
-							    old_write_domain);
-		}
-	}
-}
-
 static u32
 i915_gem_get_seqno(struct drm_device *dev)
 {
@@ -1633,8 +1608,6 @@  i915_add_request(struct intel_ring_buffer *ring,
 					   &dev_priv->mm.retire_work, HZ);
 	}
 
-	WARN_ON(!list_empty(&ring->gpu_write_list));
-
 	return 0;
 }
 
@@ -1677,7 +1650,7 @@  static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
 				       ring_list);
 
 		obj->base.write_domain = 0;
-		list_del_init(&obj->gpu_write_list);
+		obj->pending_gpu_write = false;
 		i915_gem_object_move_to_inactive(obj);
 	}
 }
@@ -2005,11 +1978,6 @@  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
 {
 	int ret;
 
-	/* This function only exists to support waiting for existing rendering,
-	 * not for emitting required flushes.
-	 */
-	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
 	/* If there is rendering queued on the buffer being evicted, wait for
 	 * it.
 	 */
@@ -2017,6 +1985,7 @@  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
 		ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
 		if (ret)
 			return ret;
+
 		i915_gem_retire_requests_ring(obj->ring);
 	}
 
@@ -2288,26 +2257,14 @@  i915_gem_flush_ring(struct intel_ring_buffer *ring,
 	if (ret)
 		return ret;
 
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		i915_gem_process_flushing_list(ring, flush_domains);
-
 	return 0;
 }
 
 static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
-	int ret;
-
-	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+	if (list_empty(&ring->active_list))
 		return 0;
 
-	if (!list_empty(&ring->gpu_write_list)) {
-		ret = i915_gem_flush_ring(ring,
-				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-		if (ret)
-			return ret;
-	}
-
 	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
 }
 
@@ -2323,10 +2280,6 @@  int i915_gpu_idle(struct drm_device *dev)
 		if (ret)
 			return ret;
 
-		/* Is the device fubar? */
-		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
-			return -EBUSY;
-
 		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
 		if (ret)
 			return ret;
@@ -3471,7 +3424,6 @@  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 	INIT_LIST_HEAD(&obj->gtt_list);
 	INIT_LIST_HEAD(&obj->ring_list);
 	INIT_LIST_HEAD(&obj->exec_list);
-	INIT_LIST_HEAD(&obj->gpu_write_list);
 	obj->madv = I915_MADV_WILLNEED;
 	/* Avoid an unnecessary call to unbind on the first bind. */
 	obj->map_and_fenceable = true;
@@ -3892,7 +3844,6 @@  init_ring_lists(struct intel_ring_buffer *ring)
 {
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 }
 
 void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0c26692..ca48978 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,9 +938,8 @@  i915_gem_execbuffer_move_to_active(struct list_head *objects,
 	struct drm_i915_gem_object *obj;
 
 	list_for_each_entry(obj, objects, exec_list) {
-		  u32 old_read = obj->base.read_domains;
-		  u32 old_write = obj->base.write_domain;
-
+		u32 old_read = obj->base.read_domains;
+		u32 old_write = obj->base.write_domain;
 
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->base.write_domain = obj->base.pending_write_domain;
@@ -950,11 +949,10 @@  i915_gem_execbuffer_move_to_active(struct list_head *objects,
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->pending_gpu_write = true;
-			list_move_tail(&obj->gpu_write_list,
-				       &ring->gpu_write_list);
 			if (obj->pin_count) /* check for potential scanout */
 				intel_mark_busy(ring->dev, obj);
-		}
+		} else
+			obj->pending_gpu_write = false;
 
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ddc4859..7c166ff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1001,7 +1001,6 @@  static int intel_init_ring_buffer(struct drm_device *dev,
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 	ring->size = 32 * PAGE_SIZE;
 
 	init_waitqueue_head(&ring->irq_queue);
@@ -1472,7 +1471,6 @@  int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 
 	ring->size = size;
 	ring->effective_size = ring->size;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81f..7986f30 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -101,15 +101,6 @@  struct  intel_ring_buffer {
 	struct list_head request_list;
 
 	/**
-	 * List of objects currently pending a GPU write flush.
-	 *
-	 * All elements on this list will belong to either the
-	 * active_list or flushing_list, last_rendering_seqno can
-	 * be used to differentiate between the two elements.
-	 */
-	struct list_head gpu_write_list;
-
-	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
 	u32 outstanding_lazy_request;