diff mbox

[03/11] drm/i915: Prepare for non-object vma

Message ID 20180605071949.14159-4-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 5, 2018, 7:19 a.m. UTC
In order to allow ourselves to use VMA to wrap other entities other than
GEM objects, we need to allow for the vma->obj backpointer to be NULL.
In most cases, we know we are operating on a GEM object and its vma, but
we need the core code (such as i915_vma_pin/insert/bind/unbind) to work
regardless of the innards.

The remaining eyesore here is vma->obj->cache_level and related (but
less of an issue) vma->obj->gt_ro. With a bit of care we should mirror
those on the vma itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c   |  7 +-
 drivers/gpu/drm/i915/i915_gpu_error.c |  7 +-
 drivers/gpu/drm/i915/i915_vma.c       | 96 ++++++++++++++++-----------
 drivers/gpu/drm/i915/i915_vma.h       |  2 +-
 4 files changed, 67 insertions(+), 45 deletions(-)

Comments

Joonas Lahtinen June 5, 2018, 9:21 a.m. UTC | #1
Quoting Chris Wilson (2018-06-05 10:19:41)
> In order to allow ourselves to use VMA to wrap other entities other than
> GEM objects, we need to allow for the vma->obj backpointer to be NULL.
> In most cases, we know we are operating on a GEM object and its vma, but
> we need the core code (such as i915_vma_pin/insert/bind/unbind) to work
> regardless of the innards.
> 
> The remaining eyesore here is vma->obj->cache_level and related (but
> less of an issue) vma->obj->gt_ro. With a bit of care we should mirror
> those on the vma itself.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Matthew Auld <matthew.william.auld@gmail.com>

> @@ -1569,11 +1572,11 @@ static void capture_pinned_buffers(struct i915_gpu_state *error)
>         int count_inactive, count_active;
>  
>         count_inactive = 0;
> -       list_for_each_entry(vma, &vm->active_list, vm_link)
> +       list_for_each_entry(vma, &vm->inactive_list, vm_link)
>                 count_inactive++;
>  
>         count_active = 0;
> -       list_for_each_entry(vma, &vm->inactive_list, vm_link)
> +       list_for_each_entry(vma, &vm->active_list, vm_link)
>                 count_active++;

Pretty sure this should go as a separate bugfix...

> @@ -586,23 +592,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
>                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
>         }
>         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> -       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
> +       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
>  
>         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
>  
> -       spin_lock(&dev_priv->mm.obj_lock);
> -       list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
> -       obj->bind_count++;
> -       spin_unlock(&dev_priv->mm.obj_lock);
> +       if (vma->obj) {
> +               struct drm_i915_gem_object *obj = vma->obj;
> +
> +               spin_lock(&dev_priv->mm.obj_lock);
> +               list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
> +               obj->bind_count++;
> +               spin_unlock(&dev_priv->mm.obj_lock);
>  
> -       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
> +               GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);

checkpatch will yell for long line. Block could even warrant a __ func.

> +       }
>  
>         return 0;
>  
>  err_clear:
>         vma->vm->clear_pages(vma);
>  err_unpin:
> -       i915_gem_object_unpin_pages(obj);
> +       if (vma->obj)
> +               i915_gem_object_unpin_pages(vma->obj);
>         return ret;
>  }
>  
> @@ -610,7 +621,6 @@ static void
>  i915_vma_remove(struct i915_vma *vma)
>  {
>         struct drm_i915_private *i915 = vma->vm->i915;
> -       struct drm_i915_gem_object *obj = vma->obj;
>  
>         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
>         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
> @@ -620,20 +630,26 @@ i915_vma_remove(struct i915_vma *vma)
>         drm_mm_remove_node(&vma->node);
>         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
>  
> -       /* Since the unbound list is global, only move to that list if
> +       /*
> +        * Since the unbound list is global, only move to that list if
>          * no more VMAs exist.
>          */
> -       spin_lock(&i915->mm.obj_lock);
> -       if (--obj->bind_count == 0)
> -               list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
> -       spin_unlock(&i915->mm.obj_lock);
> -
> -       /* And finally now the object is completely decoupled from this vma,
> -        * we can drop its hold on the backing storage and allow it to be
> -        * reaped by the shrinker.
> -        */
> -       i915_gem_object_unpin_pages(obj);
> -       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
> +       if (vma->obj) {
> +               struct drm_i915_gem_object *obj = vma->obj;
> +
> +               spin_lock(&i915->mm.obj_lock);
> +               if (--obj->bind_count == 0)
> +                       list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
> +               spin_unlock(&i915->mm.obj_lock);
> +
> +               /*
> +                * And finally now the object is completely decoupled from this
> +                * vma, we can drop its hold on the backing storage and allow
> +                * it to be reaped by the shrinker.
> +                */
> +               i915_gem_object_unpin_pages(obj);
> +               GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);

Right, two helpers it is.

> +++ b/drivers/gpu/drm/i915/i915_vma.h
> @@ -407,7 +407,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
>  static inline void
>  i915_vma_unpin_fence(struct i915_vma *vma)
>  {
> -       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
> +       /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */

Umm?

Regards, Joonas
Chris Wilson June 5, 2018, 9:25 a.m. UTC | #2
Quoting Joonas Lahtinen (2018-06-05 10:21:29)
> Quoting Chris Wilson (2018-06-05 10:19:41)
> > +++ b/drivers/gpu/drm/i915/i915_vma.h
> > @@ -407,7 +407,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
> >  static inline void
> >  i915_vma_unpin_fence(struct i915_vma *vma)
> >  {
> > -       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
> > +       /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
> 
> Umm?

Header inclusion pain. drm_i915_private isn't defined at this point.
So I left a placeholder just in case.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4fb5c79ac24b..3f84122ec145 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3585,8 +3585,11 @@  void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 		if (!i915_vma_unbind(vma))
 			continue;
 
-		WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
-		WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+		WARN_ON(i915_vma_bind(vma,
+				      obj ? obj->cache_level : 0,
+				      PIN_UPDATE));
+		if (obj)
+			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
 	}
 
 	ggtt->vm.closed = false;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index cd09a1688192..df524c9cad40 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1050,6 +1050,9 @@  static u32 capture_error_bo(struct drm_i915_error_buffer *err,
 	int i = 0;
 
 	list_for_each_entry(vma, head, vm_link) {
+		if (!vma->obj)
+			continue;
+
 		if (pinned_only && !i915_vma_is_pinned(vma))
 			continue;
 
@@ -1569,11 +1572,11 @@  static void capture_pinned_buffers(struct i915_gpu_state *error)
 	int count_inactive, count_active;
 
 	count_inactive = 0;
-	list_for_each_entry(vma, &vm->active_list, vm_link)
+	list_for_each_entry(vma, &vm->inactive_list, vm_link)
 		count_inactive++;
 
 	count_active = 0;
-	list_for_each_entry(vma, &vm->inactive_list, vm_link)
+	list_for_each_entry(vma, &vm->active_list, vm_link)
 		count_active++;
 
 	bo = NULL;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e8f07cdca063..5c032b12ba8d 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -345,7 +345,7 @@  void i915_vma_flush_writes(struct i915_vma *vma)
 
 void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
-	lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
 	GEM_BUG_ON(vma->iomap == NULL);
 
@@ -365,6 +365,7 @@  void i915_vma_unpin_and_release(struct i915_vma **p_vma)
 		return;
 
 	obj = vma->obj;
+	GEM_BUG_ON(!obj);
 
 	i915_vma_unpin(vma);
 	i915_vma_close(vma);
@@ -477,7 +478,7 @@  static int
 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
 	struct drm_i915_private *dev_priv = vma->vm->i915;
-	struct drm_i915_gem_object *obj = vma->obj;
+	unsigned int cache_level;
 	u64 start, end;
 	int ret;
 
@@ -512,16 +513,21 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
-			  size, obj->base.size,
-			  flags & PIN_MAPPABLE ? "mappable" : "total",
+		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
+			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
 		return -ENOSPC;
 	}
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ret;
+	if (vma->obj) {
+		ret = i915_gem_object_pin_pages(vma->obj);
+		if (ret)
+			return ret;
+
+		cache_level = vma->obj->cache_level;
+	} else {
+		cache_level = 0;
+	}
 
 	GEM_BUG_ON(vma->pages);
 
@@ -538,7 +544,7 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		}
 
 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
-					   size, offset, obj->cache_level,
+					   size, offset, cache_level,
 					   flags);
 		if (ret)
 			goto err_clear;
@@ -577,7 +583,7 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		}
 
 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
-					  size, alignment, obj->cache_level,
+					  size, alignment, cache_level,
 					  start, end, flags);
 		if (ret)
 			goto err_clear;
@@ -586,23 +592,28 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
 	}
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
 
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 
-	spin_lock(&dev_priv->mm.obj_lock);
-	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
-	obj->bind_count++;
-	spin_unlock(&dev_priv->mm.obj_lock);
+	if (vma->obj) {
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		spin_lock(&dev_priv->mm.obj_lock);
+		list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+		obj->bind_count++;
+		spin_unlock(&dev_priv->mm.obj_lock);
 
-	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+		GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+	}
 
 	return 0;
 
 err_clear:
 	vma->vm->clear_pages(vma);
 err_unpin:
-	i915_gem_object_unpin_pages(obj);
+	if (vma->obj)
+		i915_gem_object_unpin_pages(vma->obj);
 	return ret;
 }
 
@@ -610,7 +621,6 @@  static void
 i915_vma_remove(struct i915_vma *vma)
 {
 	struct drm_i915_private *i915 = vma->vm->i915;
-	struct drm_i915_gem_object *obj = vma->obj;
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
@@ -620,20 +630,26 @@  i915_vma_remove(struct i915_vma *vma)
 	drm_mm_remove_node(&vma->node);
 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-	/* Since the unbound list is global, only move to that list if
+	/*
+	 * Since the unbound list is global, only move to that list if
 	 * no more VMAs exist.
 	 */
-	spin_lock(&i915->mm.obj_lock);
-	if (--obj->bind_count == 0)
-		list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
-	spin_unlock(&i915->mm.obj_lock);
-
-	/* And finally now the object is completely decoupled from this vma,
-	 * we can drop its hold on the backing storage and allow it to be
-	 * reaped by the shrinker.
-	 */
-	i915_gem_object_unpin_pages(obj);
-	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+	if (vma->obj) {
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		spin_lock(&i915->mm.obj_lock);
+		if (--obj->bind_count == 0)
+			list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+		spin_unlock(&i915->mm.obj_lock);
+
+		/*
+		 * And finally now the object is completely decoupled from this
+		 * vma, we can drop its hold on the backing storage and allow
+		 * it to be reaped by the shrinker.
+		 */
+		i915_gem_object_unpin_pages(obj);
+		GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+	}
 }
 
 int __i915_vma_do_pin(struct i915_vma *vma,
@@ -658,7 +674,7 @@  int __i915_vma_do_pin(struct i915_vma *vma,
 	}
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+	ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
 	if (ret)
 		goto err_remove;
 
@@ -715,6 +731,7 @@  void i915_vma_reopen(struct i915_vma *vma)
 
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
+	struct drm_i915_private *i915 = vma->vm->i915;
 	int i;
 
 	GEM_BUG_ON(vma->node.allocated);
@@ -726,12 +743,13 @@  static void __i915_vma_destroy(struct i915_vma *vma)
 
 	list_del(&vma->obj_link);
 	list_del(&vma->vm_link);
-	rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+	if (vma->obj)
+		rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
 	if (!i915_vma_is_ggtt(vma))
 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
-	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+	kmem_cache_free(i915->vmas, vma);
 }
 
 void i915_vma_destroy(struct i915_vma *vma)
@@ -797,13 +815,13 @@  void i915_vma_revoke_mmap(struct i915_vma *vma)
 
 int i915_vma_unbind(struct i915_vma *vma)
 {
-	struct drm_i915_gem_object *obj = vma->obj;
 	unsigned long active;
 	int ret;
 
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
-	/* First wait upon any activity as retiring the request may
+	/*
+	 * First wait upon any activity as retiring the request may
 	 * have side-effects such as unpinning or even unbinding this vma.
 	 */
 	might_sleep();
@@ -811,7 +829,8 @@  int i915_vma_unbind(struct i915_vma *vma)
 	if (active) {
 		int idx;
 
-		/* When a closed VMA is retired, it is unbound - eek.
+		/*
+		 * When a closed VMA is retired, it is unbound - eek.
 		 * In order to prevent it from being recursively closed,
 		 * take a pin on the vma so that the second unbind is
 		 * aborted.
@@ -849,9 +868,6 @@  int i915_vma_unbind(struct i915_vma *vma)
 	if (!drm_mm_node_allocated(&vma->node))
 		return 0;
 
-	GEM_BUG_ON(obj->bind_count == 0);
-	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
 	if (i915_vma_is_map_and_fenceable(vma)) {
 		/*
 		 * Check that we have flushed all writes through the GGTT
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index fc4294cfaa91..f0532f1a4953 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -407,7 +407,7 @@  static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
 static inline void
 i915_vma_unpin_fence(struct i915_vma *vma)
 {
-	lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+	/* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
 	if (vma->fence)
 		__i915_vma_unpin_fence(vma);
 }