diff mbox

[CI] drm/i915: Refactor unsettting obj->mm.pages

Message ID 20180611075532.26534-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 11, 2018, 7:55 a.m. UTC
As i915_gem_object_phys_attach() wants to play dirty and mess around
with obj->mm.pages itself (replacing the shmemfs with a DMA allocation),
refactor the gubbins so into i915_gem_object_unset_pages() that we don't
have to duplicate all the secrets.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 68 ++++++++++++++++++---------------
 1 file changed, 37 insertions(+), 31 deletions(-)

Comments

Jani Nikula June 11, 2018, 12:35 p.m. UTC | #1
On Mon, 11 Jun 2018, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> As i915_gem_object_phys_attach() wants to play dirty and mess around
> with obj->mm.pages itself (replacing the shmemfs with a DMA allocation),
> refactor the gubbins so into i915_gem_object_unset_pages() that we don't
> have to duplicate all the secrets.

Triple t in "unsettting" in the subject. Can be fixed while applying.

BR,
Jani.

>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c | 68 ++++++++++++++++++---------------
>  1 file changed, 37 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 4187e0688e50..026a27afa65c 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2401,29 +2401,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
>  	rcu_read_unlock();
>  }
>  
> -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
> -				 enum i915_mm_subclass subclass)
> +static struct sg_table *
> +__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
>  	struct sg_table *pages;
>  
> -	if (i915_gem_object_has_pinned_pages(obj))
> -		return;
> -
> -	GEM_BUG_ON(obj->bind_count);
> -	if (!i915_gem_object_has_pages(obj))
> -		return;
> -
> -	/* May be called by shrinker from within get_pages() (on another bo) */
> -	mutex_lock_nested(&obj->mm.lock, subclass);
> -	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
> -		goto unlock;
> -
> -	/* ->put_pages might need to allocate memory for the bit17 swizzle
> -	 * array, hence protect them from being reaped by removing them from gtt
> -	 * lists early. */
>  	pages = fetch_and_zero(&obj->mm.pages);
> -	GEM_BUG_ON(!pages);
> +	if (!pages)
> +		return NULL;
>  
>  	spin_lock(&i915->mm.obj_lock);
>  	list_del(&obj->mm.link);
> @@ -2442,12 +2428,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
>  	}
>  
>  	__i915_gem_object_reset_page_iter(obj);
> +	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
> +
> +	return pages;
> +}
>  
> +void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
> +				 enum i915_mm_subclass subclass)
> +{
> +	struct sg_table *pages;
> +
> +	if (i915_gem_object_has_pinned_pages(obj))
> +		return;
> +
> +	GEM_BUG_ON(obj->bind_count);
> +	if (!i915_gem_object_has_pages(obj))
> +		return;
> +
> +	/* May be called by shrinker from within get_pages() (on another bo) */
> +	mutex_lock_nested(&obj->mm.lock, subclass);
> +	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
> +		goto unlock;
> +
> +	/*
> +	 * ->put_pages might need to allocate memory for the bit17 swizzle
> +	 * array, hence protect them from being reaped by removing them from gtt
> +	 * lists early.
> +	 */
> +	pages = __i915_gem_object_unset_pages(obj);
>  	if (!IS_ERR(pages))
>  		obj->ops->put_pages(obj, pages);
>  
> -	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
> -
>  unlock:
>  	mutex_unlock(&obj->mm.lock);
>  }
> @@ -6090,16 +6101,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
>  		goto err_unlock;
>  	}
>  
> -	pages = fetch_and_zero(&obj->mm.pages);
> -	if (pages) {
> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> -
> -		__i915_gem_object_reset_page_iter(obj);
> -
> -		spin_lock(&i915->mm.obj_lock);
> -		list_del(&obj->mm.link);
> -		spin_unlock(&i915->mm.obj_lock);
> -	}
> +	pages = __i915_gem_object_unset_pages(obj);
>  
>  	obj->ops = &i915_gem_phys_ops;
>  
> @@ -6117,7 +6119,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
>  
>  err_xfer:
>  	obj->ops = &i915_gem_object_ops;
> -	obj->mm.pages = pages;
> +	if (!IS_ERR_OR_NULL(pages)) {
> +		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
> +
> +		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
> +	}
>  err_unlock:
>  	mutex_unlock(&obj->mm.lock);
>  	return err;
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4187e0688e50..026a27afa65c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2401,29 +2401,15 @@  static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 	rcu_read_unlock();
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				 enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *pages;
 
-	if (i915_gem_object_has_pinned_pages(obj))
-		return;
-
-	GEM_BUG_ON(obj->bind_count);
-	if (!i915_gem_object_has_pages(obj))
-		return;
-
-	/* May be called by shrinker from within get_pages() (on another bo) */
-	mutex_lock_nested(&obj->mm.lock, subclass);
-	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
-		goto unlock;
-
-	/* ->put_pages might need to allocate memory for the bit17 swizzle
-	 * array, hence protect them from being reaped by removing them from gtt
-	 * lists early. */
 	pages = fetch_and_zero(&obj->mm.pages);
-	GEM_BUG_ON(!pages);
+	if (!pages)
+		return NULL;
 
 	spin_lock(&i915->mm.obj_lock);
 	list_del(&obj->mm.link);
@@ -2442,12 +2428,37 @@  void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	}
 
 	__i915_gem_object_reset_page_iter(obj);
+	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+	return pages;
+}
 
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				 enum i915_mm_subclass subclass)
+{
+	struct sg_table *pages;
+
+	if (i915_gem_object_has_pinned_pages(obj))
+		return;
+
+	GEM_BUG_ON(obj->bind_count);
+	if (!i915_gem_object_has_pages(obj))
+		return;
+
+	/* May be called by shrinker from within get_pages() (on another bo) */
+	mutex_lock_nested(&obj->mm.lock, subclass);
+	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+		goto unlock;
+
+	/*
+	 * ->put_pages might need to allocate memory for the bit17 swizzle
+	 * array, hence protect them from being reaped by removing them from gtt
+	 * lists early.
+	 */
+	pages = __i915_gem_object_unset_pages(obj);
 	if (!IS_ERR(pages))
 		obj->ops->put_pages(obj, pages);
 
-	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
 unlock:
 	mutex_unlock(&obj->mm.lock);
 }
@@ -6090,16 +6101,7 @@  int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 		goto err_unlock;
 	}
 
-	pages = fetch_and_zero(&obj->mm.pages);
-	if (pages) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
-		__i915_gem_object_reset_page_iter(obj);
-
-		spin_lock(&i915->mm.obj_lock);
-		list_del(&obj->mm.link);
-		spin_unlock(&i915->mm.obj_lock);
-	}
+	pages = __i915_gem_object_unset_pages(obj);
 
 	obj->ops = &i915_gem_phys_ops;
 
@@ -6117,7 +6119,11 @@  int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 
 err_xfer:
 	obj->ops = &i915_gem_object_ops;
-	obj->mm.pages = pages;
+	if (!IS_ERR_OR_NULL(pages)) {
+		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+	}
 err_unlock:
 	mutex_unlock(&obj->mm.lock);
 	return err;