diff mbox series

[17/25] drm/i915/selftests: Fill all the drm_vma_manager holes

Message ID 20191110185806.17413-17-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [01/25] drm/i915: Protect context while grabbing its name for the request | expand

Commit Message

Chris Wilson Nov. 10, 2019, 6:57 p.m. UTC
To test mmap_offset_exhaustion, we first have to fill the entire vma
manager leaving a single page. Don't assume that the vma manager is not
already fragment, and fill all the holes.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 .../drm/i915/gem/selftests/i915_gem_mman.c    | 45 ++++++++++++++-----
 1 file changed, 33 insertions(+), 12 deletions(-)

Comments

Matthew Auld Nov. 11, 2019, 12:01 p.m. UTC | #1
On 10/11/2019 18:57, Chris Wilson wrote:
> To test mmap_offset_exhaustion, we first have to fill the entire vma
> manager leaving a single page. Don't assume that the vma manager is not
> already fragment, and fill all the holes.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>

Why is there anything in vm_addr_space_mm at this point? Just leftovers 
from a previous test which have yet to be cleaned up?

> ---
>   .../drm/i915/gem/selftests/i915_gem_mman.c    | 45 ++++++++++++++-----
>   1 file changed, 33 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> index cabf45b1403f..b307574e3f3b 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> @@ -607,28 +607,43 @@ static int igt_mmap_offset_exhaustion(void *arg)
>   	struct drm_i915_private *i915 = arg;
>   	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
>   	struct drm_i915_gem_object *obj;
> -	struct drm_mm_node resv, *hole;
> -	u64 hole_start, hole_end;
> +	struct drm_mm_node *hole, *next;
>   	int loop, err;
>   
>   	/* Disable background reaper */
>   	disable_retire_worker(i915);
>   	GEM_BUG_ON(!i915->gt.awake);
> +	intel_gt_retire_requests(&i915->gt);
> +	i915_gem_drain_freed_objects(i915);
>   
>   	/* Trim the device mmap space to only a page */
> -	memset(&resv, 0, sizeof(resv));
> -	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
> -		resv.start = hole_start;
> -		resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
> -		mmap_offset_lock(i915);
> -		err = drm_mm_reserve_node(mm, &resv);
> -		mmap_offset_unlock(i915);
> +	mmap_offset_lock(i915);
> +	loop = 1; /* PAGE_SIZE units */
> +	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
> +		struct drm_mm_node *resv;
> +
> +		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
> +		if (!resv) {
> +			err = -ENOMEM;
> +			goto out_park;
> +		}
> +
> +		resv->start = drm_mm_hole_node_start(hole) + loop;
> +		resv->size = hole->hole_size - loop;
> +		resv->color = -1ul;
> +		loop = 0;

Praying that there isn't a one page hole at the start.
Reviewed-by: Matthew Auld <matthew.auld@intel.com>

> +
> +		pr_debug("Reserving hole [%llx + %llx]\n",
> +			 resv->start, resv->size);
> +
> +		err = drm_mm_reserve_node(mm, resv);
>   		if (err) {
>   			pr_err("Failed to trim VMA manager, err=%d\n", err);

kfree(resv);

>   			goto out_park;
>   		}
> -		break;
>   	}
> +	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
> +	mmap_offset_unlock(i915);
>   
>   	/* Just fits! */
>   	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
> @@ -685,9 +700,15 @@ static int igt_mmap_offset_exhaustion(void *arg)
>   
>   out:
>   	mmap_offset_lock(i915);
> -	drm_mm_remove_node(&resv);
> -	mmap_offset_unlock(i915);
>   out_park:
> +	drm_mm_for_each_node_safe(hole, next, mm) {
> +		if (hole->color != -1ul)
> +			continue;
> +
> +		drm_mm_remove_node(hole);
> +		kfree(hole);
> +	}
> +	mmap_offset_unlock(i915);
>   	restore_retire_worker(i915);
>   	return err;
>   err_obj:
>
Chris Wilson Nov. 11, 2019, 12:09 p.m. UTC | #2
Quoting Matthew Auld (2019-11-11 12:01:54)
> On 10/11/2019 18:57, Chris Wilson wrote:
> > To test mmap_offset_exhaustion, we first have to fill the entire vma
> > manager leaving a single page. Don't assume that the vma manager is not
> > already fragment, and fill all the holes.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Matthew Auld <matthew.auld@intel.com>
> 
> Why is there anything in vm_addr_space_mm at this point? Just leftovers 
> from a previous test which have yet to be cleaned up?

Looks at Matthew, yes why is there anything here? :-p

If it helps, the issue is only on lmem and goes away after flushing all
the requests -- so some object we used during GT init had an
mmap_offset.

> > ---
> >   .../drm/i915/gem/selftests/i915_gem_mman.c    | 45 ++++++++++++++-----
> >   1 file changed, 33 insertions(+), 12 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> > index cabf45b1403f..b307574e3f3b 100644
> > --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> > +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> > @@ -607,28 +607,43 @@ static int igt_mmap_offset_exhaustion(void *arg)
> >       struct drm_i915_private *i915 = arg;
> >       struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
> >       struct drm_i915_gem_object *obj;
> > -     struct drm_mm_node resv, *hole;
> > -     u64 hole_start, hole_end;
> > +     struct drm_mm_node *hole, *next;
> >       int loop, err;
> >   
> >       /* Disable background reaper */
> >       disable_retire_worker(i915);
> >       GEM_BUG_ON(!i915->gt.awake);
> > +     intel_gt_retire_requests(&i915->gt);
> > +     i915_gem_drain_freed_objects(i915);
> >   
> >       /* Trim the device mmap space to only a page */
> > -     memset(&resv, 0, sizeof(resv));
> > -     drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
> > -             resv.start = hole_start;
> > -             resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
> > -             mmap_offset_lock(i915);
> > -             err = drm_mm_reserve_node(mm, &resv);
> > -             mmap_offset_unlock(i915);
> > +     mmap_offset_lock(i915);
> > +     loop = 1; /* PAGE_SIZE units */
> > +     list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
> > +             struct drm_mm_node *resv;
> > +
> > +             resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
> > +             if (!resv) {
> > +                     err = -ENOMEM;
> > +                     goto out_park;
> > +             }
> > +
> > +             resv->start = drm_mm_hole_node_start(hole) + loop;
> > +             resv->size = hole->hole_size - loop;
> > +             resv->color = -1ul;
> > +             loop = 0;
> 
> Praying that there isn't a one page hole at the start.

True, unlikely but easy enough to skip.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index cabf45b1403f..b307574e3f3b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -607,28 +607,43 @@  static int igt_mmap_offset_exhaustion(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
 	struct drm_i915_gem_object *obj;
-	struct drm_mm_node resv, *hole;
-	u64 hole_start, hole_end;
+	struct drm_mm_node *hole, *next;
 	int loop, err;
 
 	/* Disable background reaper */
 	disable_retire_worker(i915);
 	GEM_BUG_ON(!i915->gt.awake);
+	intel_gt_retire_requests(&i915->gt);
+	i915_gem_drain_freed_objects(i915);
 
 	/* Trim the device mmap space to only a page */
-	memset(&resv, 0, sizeof(resv));
-	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-		resv.start = hole_start;
-		resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
-		mmap_offset_lock(i915);
-		err = drm_mm_reserve_node(mm, &resv);
-		mmap_offset_unlock(i915);
+	mmap_offset_lock(i915);
+	loop = 1; /* PAGE_SIZE units */
+	list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
+		struct drm_mm_node *resv;
+
+		resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
+		if (!resv) {
+			err = -ENOMEM;
+			goto out_park;
+		}
+
+		resv->start = drm_mm_hole_node_start(hole) + loop;
+		resv->size = hole->hole_size - loop;
+		resv->color = -1ul;
+		loop = 0;
+
+		pr_debug("Reserving hole [%llx + %llx]\n",
+			 resv->start, resv->size);
+
+		err = drm_mm_reserve_node(mm, resv);
 		if (err) {
 			pr_err("Failed to trim VMA manager, err=%d\n", err);
 			goto out_park;
 		}
-		break;
 	}
+	GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
+	mmap_offset_unlock(i915);
 
 	/* Just fits! */
 	if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
@@ -685,9 +700,15 @@  static int igt_mmap_offset_exhaustion(void *arg)
 
 out:
 	mmap_offset_lock(i915);
-	drm_mm_remove_node(&resv);
-	mmap_offset_unlock(i915);
 out_park:
+	drm_mm_for_each_node_safe(hole, next, mm) {
+		if (hole->color != -1ul)
+			continue;
+
+		drm_mm_remove_node(hole);
+		kfree(hole);
+	}
+	mmap_offset_unlock(i915);
 	restore_retire_worker(i915);
 	return err;
 err_obj: