diff mbox

[2/2] drm/i915: Recover all available ringbuffer space following reset

Message ID 87612ua7n3.fsf@gaia.fi.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mika Kuoppala Sept. 28, 2015, 3:25 p.m. UTC
Hi,

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Having flushed all requests from all queues, we know that all
> ringbuffers must now be empty. However, since we do not reclaim
> all space when retiring the request (to prevent HEADs colliding
> with rapid ringbuffer wraparound) the amount of available space
> on each ringbuffer upon reset is less than when we start. Do one
> more pass over all the ringbuffers to reset the available space
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> Cc: Dave Gordon <david.s.gordon@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
>  drivers/gpu/drm/i915/intel_lrc.c        |  1 +
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
>  4 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 41263cd4170c..3a42c350fec9 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
>  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>  					struct intel_engine_cs *ring)
>  {
> +	struct intel_ringbuffer *buffer;
> +
>  	while (!list_empty(&ring->active_list)) {
>  		struct drm_i915_gem_object *obj;
>  
> @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>  
>  		i915_gem_request_retire(request);
>  	}
> +
> +	/* Having flushed all requests from all queues, we know that all
> +	 * ringbuffers must now be empty. However, since we do not reclaim
> +	 * all space when retiring the request (to prevent HEADs colliding
> +	 * with rapid ringbuffer wraparound) the amount of available space
> +	 * upon reset is less than when we start. Do one more pass over
> +	 * all the ringbuffers to reset last_retired_head.
> +	 */
> +	list_for_each_entry(buffer, &ring->buffers, link) {
> +		buffer->last_retired_head = buffer->tail;
> +		intel_ring_update_space(buffer);
> +	}
>  }
>  

As right after cleaning up the rings in i915_gem_reset(),
we have i915_gem_context_reset(). That will go through
all contexts and their ringbuffers and set tail and head to
zero.

If we do the space adjustment in intel_lr_context_reset(),
we can avoid adding new ring->buffers list for this purpose:



Thanks,
--Mika

>  void i915_gem_reset(struct drm_device *dev)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 28a712e7d2d0..de52ddc108a7 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -1881,6 +1881,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
>  	init_waitqueue_head(&ring->irq_queue);
>  
> +	INIT_LIST_HEAD(&ring->buffers);
>  	INIT_LIST_HEAD(&ring->execlist_queue);
>  	INIT_LIST_HEAD(&ring->execlist_retired_req_list);
>  	spin_lock_init(&ring->execlist_lock);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 20a75bb516ac..d2e0b3b7efbf 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2030,10 +2030,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
>  	int ret;
>  
>  	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
> -	if (ring == NULL)
> +	if (ring == NULL) {
> +		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
> +				 engine->name);
>  		return ERR_PTR(-ENOMEM);
> +	}
>  
>  	ring->ring = engine;
> +	list_add(&ring->link, &engine->buffers);
>  
>  	ring->size = size;
>  	/* Workaround an erratum on the i830 which causes a hang if
> @@ -2049,8 +2053,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
>  
>  	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
>  	if (ret) {
> -		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
> -			  engine->name, ret);
> +		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
> +				 engine->name, ret);
> +		list_del(&ring->link);
>  		kfree(ring);
>  		return ERR_PTR(ret);
>  	}
> @@ -2062,6 +2067,7 @@ void
>  intel_ringbuffer_free(struct intel_ringbuffer *ring)
>  {
>  	intel_destroy_ringbuffer_obj(ring);
> +	list_del(&ring->link);
>  	kfree(ring);
>  }
>  
> @@ -2077,6 +2083,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
>  	INIT_LIST_HEAD(&ring->active_list);
>  	INIT_LIST_HEAD(&ring->request_list);
>  	INIT_LIST_HEAD(&ring->execlist_queue);
> +	INIT_LIST_HEAD(&ring->buffers);
>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
>  	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 49fa41dc0eb6..58b1976a7d0a 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -100,6 +100,7 @@ struct intel_ringbuffer {
>  	void __iomem *virtual_start;
>  
>  	struct intel_engine_cs *ring;
> +	struct list_head link;
>  
>  	u32 head;
>  	u32 tail;
> @@ -157,6 +158,7 @@ struct  intel_engine_cs {
>  	u32		mmio_base;
>  	struct		drm_device *dev;
>  	struct intel_ringbuffer *buffer;
> +	struct list_head buffers;
>  
>  	/*
>  	 * A pool of objects to use as shadow copies of client batch buffers
> -- 
> 2.5.1

Comments

Chris Wilson Sept. 28, 2015, 3:43 p.m. UTC | #1
On Mon, Sep 28, 2015 at 06:25:04PM +0300, Mika Kuoppala wrote:
> 
> Hi,
> 
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > Having flushed all requests from all queues, we know that all
> > ringbuffers must now be empty. However, since we do not reclaim
> > all space when retiring the request (to prevent HEADs colliding
> > with rapid ringbuffer wraparound) the amount of available space
> > on each ringbuffer upon reset is less than when we start. Do one
> > more pass over all the ringbuffers to reset the available space
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> > Cc: Dave Gordon <david.s.gordon@intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
> >  drivers/gpu/drm/i915/intel_lrc.c        |  1 +
> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
> >  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
> >  4 files changed, 27 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 41263cd4170c..3a42c350fec9 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
> >  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
> >  					struct intel_engine_cs *ring)
> >  {
> > +	struct intel_ringbuffer *buffer;
> > +
> >  	while (!list_empty(&ring->active_list)) {
> >  		struct drm_i915_gem_object *obj;
> >  
> > @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
> >  
> >  		i915_gem_request_retire(request);
> >  	}
> > +
> > +	/* Having flushed all requests from all queues, we know that all
> > +	 * ringbuffers must now be empty. However, since we do not reclaim
> > +	 * all space when retiring the request (to prevent HEADs colliding
> > +	 * with rapid ringbuffer wraparound) the amount of available space
> > +	 * upon reset is less than when we start. Do one more pass over
> > +	 * all the ringbuffers to reset last_retired_head.
> > +	 */
> > +	list_for_each_entry(buffer, &ring->buffers, link) {
> > +		buffer->last_retired_head = buffer->tail;
> > +		intel_ring_update_space(buffer);
> > +	}
> >  }
> >  
> 
> As right after cleaning up the rings in i915_gem_reset(),
> we have i915_gem_context_reset(). That will go through
> all contexts and their ringbuffers and set tail and head to
> zero.
> 
> If we do the space adjustment in intel_lr_context_reset(),
> we can avoid adding new ring->buffers list for this purpose:

No. The point is that we want to do it in a generic manner so that we can
remove intel_lr_context_reset() (the legacy code is just a degenerate
case of execlists, look at the patches I sent last year to so how simple
the code becomes after applying that transformation).
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_lrc.c
b/drivers/gpu/drm/i915/intel_lrc.c
index 256167b..e110d6b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2532,15 +2532,16 @@  void intel_lr_context_reset(struct drm_device
*dev,
                        WARN(1, "Failed get_pages for context obj\n");
                        continue;
                }
+
+               ringbuf->last_retired_head = ringbuf->tail;
+               intel_ring_update_space(ringbuf);
+
                page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
                reg_state = kmap_atomic(page);
 
-               reg_state[CTX_RING_HEAD+1] = 0;
-               reg_state[CTX_RING_TAIL+1] = 0;
+               reg_state[CTX_RING_HEAD+1] = ringbuf->head;
+               reg_state[CTX_RING_TAIL+1] = ringbuf->tail;
 
                kunmap_atomic(reg_state);
-
-               ringbuf->head = 0;
-               ringbuf->tail = 0;
        }