diff mbox

[030/262] drm/i915: Refactor unsettting obj->mm.pages

Message ID 20180517060738.19193-30-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson May 17, 2018, 6:03 a.m. UTC
As i915_gem_object_phys_attach() wants to play dirty and mess around
with obj->mm.pages itself (replacing the shmemfs with a DMA allocation),
refactor the gubbins so into i915_gem_object_unset_pages() that we don't
have to duplicate all the secrets.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 68 ++++++++++++++++++---------------
 1 file changed, 37 insertions(+), 31 deletions(-)

Comments

Matthew Auld May 18, 2018, 1:35 p.m. UTC | #1
On 17 May 2018 at 07:03, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> As i915_gem_object_phys_attach() wants to play dirty and mess around
> with obj->mm.pages itself (replacing the shmemfs with a DMA allocation),
> refactor the gubbins so into i915_gem_object_unset_pages() that we don't
> have to duplicate all the secrets.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c | 68 ++++++++++++++++++---------------
>  1 file changed, 37 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index be3092a03722..4e480874563f 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2410,29 +2410,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
>         rcu_read_unlock();
>  }
>
> -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
> -                                enum i915_mm_subclass subclass)
> +static struct sg_table *
> +__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
>  {
>         struct drm_i915_private *i915 = to_i915(obj->base.dev);
>         struct sg_table *pages;
>
> -       if (i915_gem_object_has_pinned_pages(obj))
> -               return;
> -
> -       GEM_BUG_ON(obj->bind_count);
> -       if (!i915_gem_object_has_pages(obj))
> -               return;
> -
> -       /* May be called by shrinker from within get_pages() (on another bo) */
> -       mutex_lock_nested(&obj->mm.lock, subclass);
> -       if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
> -               goto unlock;
> -
> -       /* ->put_pages might need to allocate memory for the bit17 swizzle
> -        * array, hence protect them from being reaped by removing them from gtt
> -        * lists early. */
>         pages = fetch_and_zero(&obj->mm.pages);
> -       GEM_BUG_ON(!pages);
> +       if (!pages)
> +               return NULL;
>
>         spin_lock(&i915->mm.obj_lock);
>         list_del(&obj->mm.link);
> @@ -2451,12 +2437,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
>         }
>
>         __i915_gem_object_reset_page_iter(obj);
> +       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
> +
> +       return pages;
> +}
> +
> +void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
> +                                enum i915_mm_subclass subclass)
> +{
> +       struct sg_table *pages;
> +
> +       if (i915_gem_object_has_pinned_pages(obj))
> +               return;
>
> +       GEM_BUG_ON(obj->bind_count);
> +       if (!i915_gem_object_has_pages(obj))
> +               return;
> +
> +       /* May be called by shrinker from within get_pages() (on another bo) */
> +       mutex_lock_nested(&obj->mm.lock, subclass);
> +       if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
> +               goto unlock;
> +
> +       /*
> +        * ->put_pages might need to allocate memory for the bit17 swizzle
> +        * array, hence protect them from being reaped by removing them from gtt
> +        * lists early.
> +        */
> +       pages = __i915_gem_object_unset_pages(obj);
>         if (!IS_ERR(pages))
>                 obj->ops->put_pages(obj, pages);
>
> -       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
> -
>  unlock:
>         mutex_unlock(&obj->mm.lock);
>  }
> @@ -6013,16 +6024,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
>                 goto err_unlock;
>         }
>
> -       pages = fetch_and_zero(&obj->mm.pages);
> -       if (pages) {
> -               struct drm_i915_private *i915 = to_i915(obj->base.dev);
> -
> -               __i915_gem_object_reset_page_iter(obj);
> -
> -               spin_lock(&i915->mm.obj_lock);
> -               list_del(&obj->mm.link);
> -               spin_unlock(&i915->mm.obj_lock);
> -       }
> +       pages = __i915_gem_object_unset_pages(obj);
>
>         obj->ops = &i915_gem_phys_ops;
>
> @@ -6040,7 +6042,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
>
>  err_xfer:
>         obj->ops = &i915_gem_object_ops;
> -       obj->mm.pages = pages;
> +       if (!IS_ERR(pages)) {

!IS_ERR_OR_NULL

Reviewed-by: Matthew Auld <matthew.auld@intel.com>
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index be3092a03722..4e480874563f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2410,29 +2410,15 @@  static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 	rcu_read_unlock();
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				 enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *pages;
 
-	if (i915_gem_object_has_pinned_pages(obj))
-		return;
-
-	GEM_BUG_ON(obj->bind_count);
-	if (!i915_gem_object_has_pages(obj))
-		return;
-
-	/* May be called by shrinker from within get_pages() (on another bo) */
-	mutex_lock_nested(&obj->mm.lock, subclass);
-	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
-		goto unlock;
-
-	/* ->put_pages might need to allocate memory for the bit17 swizzle
-	 * array, hence protect them from being reaped by removing them from gtt
-	 * lists early. */
 	pages = fetch_and_zero(&obj->mm.pages);
-	GEM_BUG_ON(!pages);
+	if (!pages)
+		return NULL;
 
 	spin_lock(&i915->mm.obj_lock);
 	list_del(&obj->mm.link);
@@ -2451,12 +2437,37 @@  void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	}
 
 	__i915_gem_object_reset_page_iter(obj);
+	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+	return pages;
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				 enum i915_mm_subclass subclass)
+{
+	struct sg_table *pages;
+
+	if (i915_gem_object_has_pinned_pages(obj))
+		return;
 
+	GEM_BUG_ON(obj->bind_count);
+	if (!i915_gem_object_has_pages(obj))
+		return;
+
+	/* May be called by shrinker from within get_pages() (on another bo) */
+	mutex_lock_nested(&obj->mm.lock, subclass);
+	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+		goto unlock;
+
+	/*
+	 * ->put_pages might need to allocate memory for the bit17 swizzle
+	 * array, hence protect them from being reaped by removing them from gtt
+	 * lists early.
+	 */
+	pages = __i915_gem_object_unset_pages(obj);
 	if (!IS_ERR(pages))
 		obj->ops->put_pages(obj, pages);
 
-	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
 unlock:
 	mutex_unlock(&obj->mm.lock);
 }
@@ -6013,16 +6024,7 @@  int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 		goto err_unlock;
 	}
 
-	pages = fetch_and_zero(&obj->mm.pages);
-	if (pages) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
-		__i915_gem_object_reset_page_iter(obj);
-
-		spin_lock(&i915->mm.obj_lock);
-		list_del(&obj->mm.link);
-		spin_unlock(&i915->mm.obj_lock);
-	}
+	pages = __i915_gem_object_unset_pages(obj);
 
 	obj->ops = &i915_gem_phys_ops;
 
@@ -6040,7 +6042,11 @@  int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 
 err_xfer:
 	obj->ops = &i915_gem_object_ops;
-	obj->mm.pages = pages;
+	if (!IS_ERR(pages)) {
+		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+	}
 err_unlock:
 	mutex_unlock(&obj->mm.lock);
 	return err;