diff mbox series

drm/i915: Separate pinning of pages from i915_vma_insert()

Message ID 20190802204111.7344-1-prathap.kumar.valsan@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: Separate pinning of pages from i915_vma_insert() | expand

Commit Message

Kumar Valsan, Prathap Aug. 2, 2019, 8:41 p.m. UTC
Currently i915_vma_insert() is responsible for allocating drm mm node
and also allocating or gathering physical pages. Move the latter to a
separate function for better readability.

Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
---
 drivers/gpu/drm/i915/i915_vma.c | 76 +++++++++++++++++++--------------
 drivers/gpu/drm/i915/i915_vma.h |  1 +
 2 files changed, 45 insertions(+), 32 deletions(-)

Comments

Chris Wilson Aug. 2, 2019, 8:30 p.m. UTC | #1
Quoting Prathap Kumar Valsan (2019-08-02 21:41:11)
> Currently i915_vma_insert() is responsible for allocating drm mm node
> and also allocating or gathering physical pages. Move the latter to a
> separate function for better readability.

Close but if you look at the mutex patches, you'll see why it has to be
before.
-Chris
Kumar Valsan, Prathap Aug. 5, 2019, 8:44 p.m. UTC | #2
On Fri, Aug 02, 2019 at 09:30:43PM +0100, Chris Wilson wrote:
> Quoting Prathap Kumar Valsan (2019-08-02 21:41:11)
> > Currently i915_vma_insert() is responsible for allocating drm mm node
> > and also allocating or gathering physical pages. Move the latter to a
> > separate function for better readability.
> 
> Close but if you look at the mutex patches, you'll see why it has to be
> before.
> -Chris

Looked at the Mutex patches. With async get_pages and async bind, pinning
and set pages are already separated out from i915_vma_insert(). So this
patch may not be needed.

Thanks,
Prathap
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 7734d6218ce7..8da88a474594 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -584,35 +584,22 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		return -ENOSPC;
 	}
 
-	if (vma->obj) {
-		ret = i915_gem_object_pin_pages(vma->obj);
-		if (ret)
-			return ret;
-
+	if (vma->obj)
 		cache_level = vma->obj->cache_level;
-	} else {
+	else
 		cache_level = 0;
-	}
-
-	GEM_BUG_ON(vma->pages);
-
-	ret = vma->ops->set_pages(vma);
-	if (ret)
-		goto err_unpin;
 
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
 		if (!IS_ALIGNED(offset, alignment) ||
-		    range_overflows(offset, size, end)) {
-			ret = -EINVAL;
-			goto err_clear;
-		}
+		    range_overflows(offset, size, end))
+			return -EINVAL;
 
 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 					   size, offset, cache_level,
 					   flags);
 		if (ret)
-			goto err_clear;
+			return ret;
 	} else {
 		/*
 		 * We only support huge gtt pages through the 48b PPGTT,
@@ -651,7 +638,7 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 					  size, alignment, cache_level,
 					  start, end, flags);
 		if (ret)
-			goto err_clear;
+			return ret;
 
 		GEM_BUG_ON(vma->node.start < start);
 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -663,28 +650,24 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
 	mutex_unlock(&vma->vm->mutex);
 
-	if (vma->obj) {
+	if (vma->obj)
 		atomic_inc(&vma->obj->bind_count);
-		assert_bind_count(vma->obj);
-	}
 
 	return 0;
-
-err_clear:
-	vma->ops->clear_pages(vma);
-err_unpin:
-	if (vma->obj)
-		i915_gem_object_unpin_pages(vma->obj);
-	return ret;
 }
 
 static void
 i915_vma_remove(struct i915_vma *vma)
 {
+	bool vma_has_pages = false;
+
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
-	vma->ops->clear_pages(vma);
+	if (vma->pages) {
+		vma_has_pages = true;
+		vma->ops->clear_pages(vma);
+	}
 
 	mutex_lock(&vma->vm->mutex);
 	drm_mm_remove_node(&vma->node);
@@ -705,11 +688,37 @@  i915_vma_remove(struct i915_vma *vma)
 		 * vma, we can drop its hold on the backing storage and allow
 		 * it to be reaped by the shrinker.
 		 */
-		i915_gem_object_unpin_pages(obj);
+		if (vma_has_pages)
+			i915_gem_object_unpin_pages(obj);
 		assert_bind_count(obj);
 	}
 }
 
+int i915_vma_set_pages(struct i915_vma *vma)
+{
+	int ret;
+
+	if (vma->obj) {
+		ret = i915_gem_object_pin_pages(vma->obj);
+		if (ret)
+			return ret;
+	}
+
+	GEM_BUG_ON(vma->pages);
+
+	ret = vma->ops->set_pages(vma);
+	if (ret) {
+		if (vma->obj)
+			i915_gem_object_unpin_pages(vma->obj);
+		return ret;
+	}
+
+	if (vma->obj)
+		assert_bind_count(vma->obj);
+
+	return ret;
+}
+
 int __i915_vma_do_pin(struct i915_vma *vma,
 		      u64 size, u64 alignment, u64 flags)
 {
@@ -729,6 +738,10 @@  int __i915_vma_do_pin(struct i915_vma *vma,
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
 			goto err_unpin;
+		ret = i915_vma_set_pages(vma);
+		if (ret)
+			goto err_remove;
+
 	}
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
@@ -743,7 +756,6 @@  int __i915_vma_do_pin(struct i915_vma *vma,
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 	return 0;
-
 err_remove:
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		i915_vma_remove(vma);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4b769db649bf..ad374166f3b8 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -288,6 +288,7 @@  i915_vma_compare(struct i915_vma *vma,
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags);
+int i915_vma_set_pages(struct i915_vma *vma);
 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
 bool i915_vma_misplaced(const struct i915_vma *vma,
 			u64 size, u64 alignment, u64 flags);