diff mbox

[04/12] drm/i915: Track active by VMA instead of object

Message ID 1374458899-8635-5-git-send-email-ben@bwidawsk.net (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky July 22, 2013, 2:08 a.m. UTC
Even though we want to be able to track active by VMA, the rest of the
code is still using objects for most internal APIs. To solve this,
create an object_is_active() function to help us in converting over to
VMA usage.

Because we intend to keep around some functions that care about objects,
and not VMAs, having this function around will be useful even as we
begin to use VMAs more in function arguments.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h            | 15 +++----
 drivers/gpu/drm/i915/i915_gem.c            | 64 ++++++++++++++++++------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 3 files changed, 48 insertions(+), 33 deletions(-)

Comments

Daniel Vetter July 23, 2013, 4:48 p.m. UTC | #1
On Sun, Jul 21, 2013 at 07:08:11PM -0700, Ben Widawsky wrote:
> Even though we want to be able to track active by VMA, the rest of the
> code is still using objects for most internal APIs. To solve this,
> create an object_is_active() function to help us in converting over to
> VMA usage.
> 
> Because we intend to keep around some functions that care about objects,
> and not VMAs, having this function around will be useful even as we
> begin to use VMAs more in function arguments.
> 
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>

Still not really convinced. For access synchronization we don't care
through which vm a bo is still access, only how (read/write) and when was
the last access (ring + seqno).

Note that this means that the per-vm lru doesn't really need an
active/inactive split anymore, for evict_something we only care about the
ordering and not whether a bo is active or not. unbind() will care but I'm
not sure that the "same bo in multiple address spaces needs to be evicted"
use-case is something we even should care about.

So imo this commit needs a good justificatio for _why_ we want to track
active per-vma. Atm I don't see a use-case, but I see complexity.
-Daniel

> ---
>  drivers/gpu/drm/i915/i915_drv.h            | 15 +++----
>  drivers/gpu/drm/i915/i915_gem.c            | 64 ++++++++++++++++++------------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
>  3 files changed, 48 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f809204..bdce9c1 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -541,6 +541,13 @@ struct i915_vma {
>  	struct drm_i915_gem_object *obj;
>  	struct i915_address_space *vm;
>  
> +	/**
> +	 * This is set if the object is on the active lists (has pending
> +	 * rendering and so a non-zero seqno), and is not set if it i s on
> +	 * inactive (ready to be unbound) list.
> +	 */
> +	unsigned int active:1;
> +
>  	/** This object's place on the active/inactive lists */
>  	struct list_head mm_list;
>  
> @@ -1266,13 +1273,6 @@ struct drm_i915_gem_object {
>  	struct list_head exec_list;
>  
>  	/**
> -	 * This is set if the object is on the active lists (has pending
> -	 * rendering and so a non-zero seqno), and is not set if it i s on
> -	 * inactive (ready to be unbound) list.
> -	 */
> -	unsigned int active:1;
> -
> -	/**
>  	 * This is set if the object has been written to since last bound
>  	 * to the GTT
>  	 */
> @@ -1726,6 +1726,7 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
>  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
>  int i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  			 struct intel_ring_buffer *to);
> +bool i915_gem_object_is_active(struct drm_i915_gem_object *obj);
>  void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
>  				    struct i915_address_space *vm,
>  				    struct intel_ring_buffer *ring);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 6bdf89d..9ea6424 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -119,10 +119,22 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
>  	return 0;
>  }
>  
> +/* NB: Not the same as !i915_gem_object_is_inactive */
> +bool i915_gem_object_is_active(struct drm_i915_gem_object *obj)
> +{
> +	struct i915_vma *vma;
> +
> +	list_for_each_entry(vma, &obj->vma_list, vma_link)
> +		if (vma->active)
> +			return true;
> +
> +	return false;
> +}
> +
>  static inline bool
>  i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
>  {
> -	return i915_gem_obj_bound_any(obj) && !obj->active;
> +	return i915_gem_obj_bound_any(obj) && !i915_gem_object_is_active(obj);
>  }
>  
>  int
> @@ -1883,14 +1895,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
>  	}
>  	obj->ring = ring;
>  
> +	/* Move from whatever list we were on to the tail of execution. */
> +	vma = i915_gem_obj_to_vma(obj, vm);
>  	/* Add a reference if we're newly entering the active list. */
> -	if (!obj->active) {
> +	if (!vma->active) {
>  		drm_gem_object_reference(&obj->base);
> -		obj->active = 1;
> +		vma->active = 1;
>  	}
>  
> -	/* Move from whatever list we were on to the tail of execution. */
> -	vma = i915_gem_obj_to_vma(obj, vm);
>  	list_move_tail(&vma->mm_list, &vm->active_list);
>  	list_move_tail(&obj->ring_list, &ring->active_list);
>  
> @@ -1911,16 +1923,23 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
>  }
>  
>  static void
> -i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
> -				 struct i915_address_space *vm)
> +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
>  {
> +	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct i915_address_space *vm;
>  	struct i915_vma *vma;
> +	int i = 0;
>  
>  	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
> -	BUG_ON(!obj->active);
>  
> -	vma = i915_gem_obj_to_vma(obj, vm);
> -	list_move_tail(&vma->mm_list, &vm->inactive_list);
> +	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> +		vma = i915_gem_obj_to_vma(obj, vm);
> +		if (!vma || !vma->active)
> +			continue;
> +		list_move_tail(&vma->mm_list, &vm->inactive_list);
> +		vma->active = 0;
> +		i++;
> +	}
>  
>  	list_del_init(&obj->ring_list);
>  	obj->ring = NULL;
> @@ -1932,8 +1951,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
>  	obj->last_fenced_seqno = 0;
>  	obj->fenced_gpu_access = false;
>  
> -	obj->active = 0;
> -	drm_gem_object_unreference(&obj->base);
> +	while (i--)
> +		drm_gem_object_unreference(&obj->base);
>  
>  	WARN_ON(i915_verify_lists(dev));
>  }
> @@ -2254,15 +2273,13 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
>  	}
>  
>  	while (!list_empty(&ring->active_list)) {
> -		struct i915_address_space *vm;
>  		struct drm_i915_gem_object *obj;
>  
>  		obj = list_first_entry(&ring->active_list,
>  				       struct drm_i915_gem_object,
>  				       ring_list);
>  
> -		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
> -			i915_gem_object_move_to_inactive(obj, vm);
> +		i915_gem_object_move_to_inactive(obj);
>  	}
>  }
>  
> @@ -2348,8 +2365,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
>  	 * by the ringbuffer to the flushing/inactive lists as appropriate.
>  	 */
>  	while (!list_empty(&ring->active_list)) {
> -		struct drm_i915_private *dev_priv = ring->dev->dev_private;
> -		struct i915_address_space *vm;
>  		struct drm_i915_gem_object *obj;
>  
>  		obj = list_first_entry(&ring->active_list,
> @@ -2359,8 +2374,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
>  		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
>  			break;
>  
> -		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
> -			i915_gem_object_move_to_inactive(obj, vm);
> +		BUG_ON(!i915_gem_object_is_active(obj));
> +		i915_gem_object_move_to_inactive(obj);
>  	}
>  
>  	if (unlikely(ring->trace_irq_seqno &&
> @@ -2435,7 +2450,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
>  {
>  	int ret;
>  
> -	if (obj->active) {
> +	if (i915_gem_object_is_active(obj)) {
>  		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
>  		if (ret)
>  			return ret;
> @@ -2500,7 +2515,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>  	if (ret)
>  		goto out;
>  
> -	if (obj->active) {
> +	if (i915_gem_object_is_active(obj)) {
>  		seqno = obj->last_read_seqno;
>  		ring = obj->ring;
>  	}
> @@ -3850,7 +3865,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>  	 */
>  	ret = i915_gem_object_flush_active(obj);
>  
> -	args->busy = obj->active;
> +	args->busy = i915_gem_object_is_active(obj);
>  	if (obj->ring) {
>  		BUILD_BUG_ON(I915_NUM_RINGS > 16);
>  		args->busy |= intel_ring_flag(obj->ring) << 16;
> @@ -4716,13 +4731,12 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
>  			cnt += obj->base.size >> PAGE_SHIFT;
>  
>  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> -		if (obj->active)
> +		if (i915_gem_object_is_active(obj))
>  			continue;
>  
>  		i915_gem_object_flush_gtt_write_domain(obj);
>  		i915_gem_object_flush_cpu_write_domain(obj);
> -		/* FIXME: Can't assume global gtt */
> -		i915_gem_object_move_to_inactive(obj, &dev_priv->gtt.base);
> +		i915_gem_object_move_to_inactive(obj);
>  
>  		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
>  			cnt += obj->base.size >> PAGE_SHIFT;
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 819d8d8..8d2643b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -251,7 +251,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>  	}
>  
>  	/* We can't wait for rendering with pagefaults disabled */
> -	if (obj->active && in_atomic())
> +	if (i915_gem_object_is_active(obj) && in_atomic())
>  		return -EFAULT;
>  
>  	reloc->delta += target_offset;
> -- 
> 1.8.3.3
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Ben Widawsky July 26, 2013, 9:48 p.m. UTC | #2
On Tue, Jul 23, 2013 at 06:48:09PM +0200, Daniel Vetter wrote:
> On Sun, Jul 21, 2013 at 07:08:11PM -0700, Ben Widawsky wrote:
> > Even though we want to be able to track active by VMA, the rest of the
> > code is still using objects for most internal APIs. To solve this,
> > create an object_is_active() function to help us in converting over to
> > VMA usage.
> > 
> > Because we intend to keep around some functions that care about objects,
> > and not VMAs, having this function around will be useful even as we
> > begin to use VMAs more in function arguments.
> > 
> > Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> 
> Still not really convinced. For access synchronization we don't care
> through which vm a bo is still access, only how (read/write) and when was
> the last access (ring + seqno).
> 
> Note that this means that the per-vm lru doesn't really need an
> active/inactive split anymore, for evict_something we only care about the
> ordering and not whether a bo is active or not. unbind() will care but I'm
> not sure that the "same bo in multiple address spaces needs to be evicted"
> use-case is something we even should care about.
> 
> So imo this commit needs a good justificatio for _why_ we want to track
> active per-vma. Atm I don't see a use-case, but I see complexity.
> -Daniel

I'm fine with deferring this until needed.

> 
> > ---
> >  drivers/gpu/drm/i915/i915_drv.h            | 15 +++----
> >  drivers/gpu/drm/i915/i915_gem.c            | 64 ++++++++++++++++++------------
> >  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
> >  3 files changed, 48 insertions(+), 33 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > index f809204..bdce9c1 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -541,6 +541,13 @@ struct i915_vma {
> >  	struct drm_i915_gem_object *obj;
> >  	struct i915_address_space *vm;
> >  
> > +	/**
> > +	 * This is set if the object is on the active lists (has pending
> > +	 * rendering and so a non-zero seqno), and is not set if it i s on
> > +	 * inactive (ready to be unbound) list.
> > +	 */
> > +	unsigned int active:1;
> > +
> >  	/** This object's place on the active/inactive lists */
> >  	struct list_head mm_list;
> >  
> > @@ -1266,13 +1273,6 @@ struct drm_i915_gem_object {
> >  	struct list_head exec_list;
> >  
> >  	/**
> > -	 * This is set if the object is on the active lists (has pending
> > -	 * rendering and so a non-zero seqno), and is not set if it i s on
> > -	 * inactive (ready to be unbound) list.
> > -	 */
> > -	unsigned int active:1;
> > -
> > -	/**
> >  	 * This is set if the object has been written to since last bound
> >  	 * to the GTT
> >  	 */
> > @@ -1726,6 +1726,7 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
> >  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
> >  int i915_gem_object_sync(struct drm_i915_gem_object *obj,
> >  			 struct intel_ring_buffer *to);
> > +bool i915_gem_object_is_active(struct drm_i915_gem_object *obj);
> >  void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> >  				    struct i915_address_space *vm,
> >  				    struct intel_ring_buffer *ring);
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 6bdf89d..9ea6424 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -119,10 +119,22 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
> >  	return 0;
> >  }
> >  
> > +/* NB: Not the same as !i915_gem_object_is_inactive */
> > +bool i915_gem_object_is_active(struct drm_i915_gem_object *obj)
> > +{
> > +	struct i915_vma *vma;
> > +
> > +	list_for_each_entry(vma, &obj->vma_list, vma_link)
> > +		if (vma->active)
> > +			return true;
> > +
> > +	return false;
> > +}
> > +
> >  static inline bool
> >  i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
> >  {
> > -	return i915_gem_obj_bound_any(obj) && !obj->active;
> > +	return i915_gem_obj_bound_any(obj) && !i915_gem_object_is_active(obj);
> >  }
> >  
> >  int
> > @@ -1883,14 +1895,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> >  	}
> >  	obj->ring = ring;
> >  
> > +	/* Move from whatever list we were on to the tail of execution. */
> > +	vma = i915_gem_obj_to_vma(obj, vm);
> >  	/* Add a reference if we're newly entering the active list. */
> > -	if (!obj->active) {
> > +	if (!vma->active) {
> >  		drm_gem_object_reference(&obj->base);
> > -		obj->active = 1;
> > +		vma->active = 1;
> >  	}
> >  
> > -	/* Move from whatever list we were on to the tail of execution. */
> > -	vma = i915_gem_obj_to_vma(obj, vm);
> >  	list_move_tail(&vma->mm_list, &vm->active_list);
> >  	list_move_tail(&obj->ring_list, &ring->active_list);
> >  
> > @@ -1911,16 +1923,23 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> >  }
> >  
> >  static void
> > -i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
> > -				 struct i915_address_space *vm)
> > +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
> >  {
> > +	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> > +	struct i915_address_space *vm;
> >  	struct i915_vma *vma;
> > +	int i = 0;
> >  
> >  	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
> > -	BUG_ON(!obj->active);
> >  
> > -	vma = i915_gem_obj_to_vma(obj, vm);
> > -	list_move_tail(&vma->mm_list, &vm->inactive_list);
> > +	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> > +		vma = i915_gem_obj_to_vma(obj, vm);
> > +		if (!vma || !vma->active)
> > +			continue;
> > +		list_move_tail(&vma->mm_list, &vm->inactive_list);
> > +		vma->active = 0;
> > +		i++;
> > +	}
> >  
> >  	list_del_init(&obj->ring_list);
> >  	obj->ring = NULL;
> > @@ -1932,8 +1951,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
> >  	obj->last_fenced_seqno = 0;
> >  	obj->fenced_gpu_access = false;
> >  
> > -	obj->active = 0;
> > -	drm_gem_object_unreference(&obj->base);
> > +	while (i--)
> > +		drm_gem_object_unreference(&obj->base);
> >  
> >  	WARN_ON(i915_verify_lists(dev));
> >  }
> > @@ -2254,15 +2273,13 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
> >  	}
> >  
> >  	while (!list_empty(&ring->active_list)) {
> > -		struct i915_address_space *vm;
> >  		struct drm_i915_gem_object *obj;
> >  
> >  		obj = list_first_entry(&ring->active_list,
> >  				       struct drm_i915_gem_object,
> >  				       ring_list);
> >  
> > -		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
> > -			i915_gem_object_move_to_inactive(obj, vm);
> > +		i915_gem_object_move_to_inactive(obj);
> >  	}
> >  }
> >  
> > @@ -2348,8 +2365,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
> >  	 * by the ringbuffer to the flushing/inactive lists as appropriate.
> >  	 */
> >  	while (!list_empty(&ring->active_list)) {
> > -		struct drm_i915_private *dev_priv = ring->dev->dev_private;
> > -		struct i915_address_space *vm;
> >  		struct drm_i915_gem_object *obj;
> >  
> >  		obj = list_first_entry(&ring->active_list,
> > @@ -2359,8 +2374,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
> >  		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
> >  			break;
> >  
> > -		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
> > -			i915_gem_object_move_to_inactive(obj, vm);
> > +		BUG_ON(!i915_gem_object_is_active(obj));
> > +		i915_gem_object_move_to_inactive(obj);
> >  	}
> >  
> >  	if (unlikely(ring->trace_irq_seqno &&
> > @@ -2435,7 +2450,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
> >  {
> >  	int ret;
> >  
> > -	if (obj->active) {
> > +	if (i915_gem_object_is_active(obj)) {
> >  		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
> >  		if (ret)
> >  			return ret;
> > @@ -2500,7 +2515,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> >  	if (ret)
> >  		goto out;
> >  
> > -	if (obj->active) {
> > +	if (i915_gem_object_is_active(obj)) {
> >  		seqno = obj->last_read_seqno;
> >  		ring = obj->ring;
> >  	}
> > @@ -3850,7 +3865,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
> >  	 */
> >  	ret = i915_gem_object_flush_active(obj);
> >  
> > -	args->busy = obj->active;
> > +	args->busy = i915_gem_object_is_active(obj);
> >  	if (obj->ring) {
> >  		BUILD_BUG_ON(I915_NUM_RINGS > 16);
> >  		args->busy |= intel_ring_flag(obj->ring) << 16;
> > @@ -4716,13 +4731,12 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
> >  			cnt += obj->base.size >> PAGE_SHIFT;
> >  
> >  	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> > -		if (obj->active)
> > +		if (i915_gem_object_is_active(obj))
> >  			continue;
> >  
> >  		i915_gem_object_flush_gtt_write_domain(obj);
> >  		i915_gem_object_flush_cpu_write_domain(obj);
> > -		/* FIXME: Can't assume global gtt */
> > -		i915_gem_object_move_to_inactive(obj, &dev_priv->gtt.base);
> > +		i915_gem_object_move_to_inactive(obj);
> >  
> >  		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
> >  			cnt += obj->base.size >> PAGE_SHIFT;
> > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > index 819d8d8..8d2643b 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > @@ -251,7 +251,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> >  	}
> >  
> >  	/* We can't wait for rendering with pagefaults disabled */
> > -	if (obj->active && in_atomic())
> > +	if (i915_gem_object_is_active(obj) && in_atomic())
> >  		return -EFAULT;
> >  
> >  	reloc->delta += target_offset;
> > -- 
> > 1.8.3.3
> > 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f809204..bdce9c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -541,6 +541,13 @@  struct i915_vma {
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm;
 
+	/**
+	 * This is set if the object is on the active lists (has pending
+	 * rendering and so a non-zero seqno), and is not set if it i s on
+	 * inactive (ready to be unbound) list.
+	 */
+	unsigned int active:1;
+
 	/** This object's place on the active/inactive lists */
 	struct list_head mm_list;
 
@@ -1266,13 +1273,6 @@  struct drm_i915_gem_object {
 	struct list_head exec_list;
 
 	/**
-	 * This is set if the object is on the active lists (has pending
-	 * rendering and so a non-zero seqno), and is not set if it i s on
-	 * inactive (ready to be unbound) list.
-	 */
-	unsigned int active:1;
-
-	/**
 	 * This is set if the object has been written to since last bound
 	 * to the GTT
 	 */
@@ -1726,6 +1726,7 @@  static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
+bool i915_gem_object_is_active(struct drm_i915_gem_object *obj);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 				    struct i915_address_space *vm,
 				    struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6bdf89d..9ea6424 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -119,10 +119,22 @@  int i915_mutex_lock_interruptible(struct drm_device *dev)
 	return 0;
 }
 
+/* NB: Not the same as !i915_gem_object_is_inactive */
+bool i915_gem_object_is_active(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->active)
+			return true;
+
+	return false;
+}
+
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-	return i915_gem_obj_bound_any(obj) && !obj->active;
+	return i915_gem_obj_bound_any(obj) && !i915_gem_object_is_active(obj);
 }
 
 int
@@ -1883,14 +1895,14 @@  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 	}
 	obj->ring = ring;
 
+	/* Move from whatever list we were on to the tail of execution. */
+	vma = i915_gem_obj_to_vma(obj, vm);
 	/* Add a reference if we're newly entering the active list. */
-	if (!obj->active) {
+	if (!vma->active) {
 		drm_gem_object_reference(&obj->base);
-		obj->active = 1;
+		vma->active = 1;
 	}
 
-	/* Move from whatever list we were on to the tail of execution. */
-	vma = i915_gem_obj_to_vma(obj, vm);
 	list_move_tail(&vma->mm_list, &vm->active_list);
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
@@ -1911,16 +1923,23 @@  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 }
 
 static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
-				 struct i915_address_space *vm)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct i915_address_space *vm;
 	struct i915_vma *vma;
+	int i = 0;
 
 	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
-	BUG_ON(!obj->active);
 
-	vma = i915_gem_obj_to_vma(obj, vm);
-	list_move_tail(&vma->mm_list, &vm->inactive_list);
+	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+		vma = i915_gem_obj_to_vma(obj, vm);
+		if (!vma || !vma->active)
+			continue;
+		list_move_tail(&vma->mm_list, &vm->inactive_list);
+		vma->active = 0;
+		i++;
+	}
 
 	list_del_init(&obj->ring_list);
 	obj->ring = NULL;
@@ -1932,8 +1951,8 @@  i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
 	obj->last_fenced_seqno = 0;
 	obj->fenced_gpu_access = false;
 
-	obj->active = 0;
-	drm_gem_object_unreference(&obj->base);
+	while (i--)
+		drm_gem_object_unreference(&obj->base);
 
 	WARN_ON(i915_verify_lists(dev));
 }
@@ -2254,15 +2273,13 @@  static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
 	}
 
 	while (!list_empty(&ring->active_list)) {
-		struct i915_address_space *vm;
 		struct drm_i915_gem_object *obj;
 
 		obj = list_first_entry(&ring->active_list,
 				       struct drm_i915_gem_object,
 				       ring_list);
 
-		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-			i915_gem_object_move_to_inactive(obj, vm);
+		i915_gem_object_move_to_inactive(obj);
 	}
 }
 
@@ -2348,8 +2365,6 @@  i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
 	 */
 	while (!list_empty(&ring->active_list)) {
-		struct drm_i915_private *dev_priv = ring->dev->dev_private;
-		struct i915_address_space *vm;
 		struct drm_i915_gem_object *obj;
 
 		obj = list_first_entry(&ring->active_list,
@@ -2359,8 +2374,8 @@  i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
 			break;
 
-		list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-			i915_gem_object_move_to_inactive(obj, vm);
+		BUG_ON(!i915_gem_object_is_active(obj));
+		i915_gem_object_move_to_inactive(obj);
 	}
 
 	if (unlikely(ring->trace_irq_seqno &&
@@ -2435,7 +2450,7 @@  i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
 	int ret;
 
-	if (obj->active) {
+	if (i915_gem_object_is_active(obj)) {
 		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
 		if (ret)
 			return ret;
@@ -2500,7 +2515,7 @@  i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	if (ret)
 		goto out;
 
-	if (obj->active) {
+	if (i915_gem_object_is_active(obj)) {
 		seqno = obj->last_read_seqno;
 		ring = obj->ring;
 	}
@@ -3850,7 +3865,7 @@  i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 */
 	ret = i915_gem_object_flush_active(obj);
 
-	args->busy = obj->active;
+	args->busy = i915_gem_object_is_active(obj);
 	if (obj->ring) {
 		BUILD_BUG_ON(I915_NUM_RINGS > 16);
 		args->busy |= intel_ring_flag(obj->ring) << 16;
@@ -4716,13 +4731,12 @@  i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 			cnt += obj->base.size >> PAGE_SHIFT;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (obj->active)
+		if (i915_gem_object_is_active(obj))
 			continue;
 
 		i915_gem_object_flush_gtt_write_domain(obj);
 		i915_gem_object_flush_cpu_write_domain(obj);
-		/* FIXME: Can't assume global gtt */
-		i915_gem_object_move_to_inactive(obj, &dev_priv->gtt.base);
+		i915_gem_object_move_to_inactive(obj);
 
 		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
 			cnt += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 819d8d8..8d2643b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -251,7 +251,7 @@  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	}
 
 	/* We can't wait for rendering with pagefaults disabled */
-	if (obj->active && in_atomic())
+	if (i915_gem_object_is_active(obj) && in_atomic())
 		return -EFAULT;
 
 	reloc->delta += target_offset;