diff mbox series

[v3,04/37] drm/i915/region: support continuous allocations

Message ID 20190809222643.23142-5-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce memory region concept (including device local memory) | expand

Commit Message

Matthew Auld Aug. 9, 2019, 10:26 p.m. UTC
Some objects may need to be allocated as a continuous block, thinking
ahead the various kernel io_mapping interfaces seem to expect it.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c    |  10 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.h    |   3 +-
 .../drm/i915/selftests/intel_memory_region.c  | 152 +++++++++++++++++-
 drivers/gpu/drm/i915/selftests/mock_region.c  |   5 +-
 5 files changed, 166 insertions(+), 8 deletions(-)

Comments

Chris Wilson Aug. 10, 2019, 10:22 a.m. UTC | #1
Quoting Matthew Auld (2019-08-09 23:26:10)
> Some objects may need to be allocated as a continuous block, thinking
> ahead the various kernel io_mapping interfaces seem to expect it.

But we could always use scattergather over top...

> @@ -98,10 +101,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
>  }
>  
>  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
> -                                       struct intel_memory_region *mem)
> +                                       struct intel_memory_region *mem,
> +                                       unsigned long flags)
>  {
>         INIT_LIST_HEAD(&obj->mm.blocks);
>         obj->mm.region= mem;
> +       obj->flags = flags;
>  
>         mutex_lock(&mem->obj_lock);
>         list_add(&obj->mm.region_link, &mem->objects);
> @@ -125,6 +130,9 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
>         if (!mem)
>                 return ERR_PTR(-ENODEV);
>  
> +       if (flags & ~I915_BO_ALLOC_FLAGS)
> +               return ERR_PTR(-EINVAL);

This is a programmer error not a user.

> +
>         size = round_up(size, mem->min_page_size);
Daniel Vetter Aug. 13, 2019, 7:17 p.m. UTC | #2
On Fri, Aug 09, 2019 at 11:26:10PM +0100, Matthew Auld wrote:
> Some objects may need to be allocated as a continuous block, thinking
> ahead the various kernel io_mapping interfaces seem to expect it.

Not really, we can vmalloc for iomappings too.
-Daniel

> 
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
> ---
>  .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
>  drivers/gpu/drm/i915/gem/i915_gem_region.c    |  10 +-
>  drivers/gpu/drm/i915/gem/i915_gem_region.h    |   3 +-
>  .../drm/i915/selftests/intel_memory_region.c  | 152 +++++++++++++++++-
>  drivers/gpu/drm/i915/selftests/mock_region.c  |   5 +-
>  5 files changed, 166 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index 5e2fa37e9bc0..eb92243d473b 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -116,6 +116,10 @@ struct drm_i915_gem_object {
>  
>  	I915_SELFTEST_DECLARE(struct list_head st_link);
>  
> +	unsigned long flags;
> +#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
> +#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
> +
>  	/*
>  	 * Is the object to be mapped as read-only to the GPU
>  	 * Only honoured if hardware has relevant pte bit
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
> index be126e70c90f..d9cd722b5dbf 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
> @@ -42,6 +42,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
>  		return -ENOMEM;
>  	}
>  
> +	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
> +		flags = I915_ALLOC_CONTIGUOUS;
> +
>  	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
>  	if (ret)
>  		goto err_free_sg;
> @@ -98,10 +101,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
>  }
>  
>  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
> -					struct intel_memory_region *mem)
> +					struct intel_memory_region *mem,
> +					unsigned long flags)
>  {
>  	INIT_LIST_HEAD(&obj->mm.blocks);
>  	obj->mm.region= mem;
> +	obj->flags = flags;
>  
>  	mutex_lock(&mem->obj_lock);
>  	list_add(&obj->mm.region_link, &mem->objects);
> @@ -125,6 +130,9 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
>  	if (!mem)
>  		return ERR_PTR(-ENODEV);
>  
> +	if (flags & ~I915_BO_ALLOC_FLAGS)
> +		return ERR_PTR(-EINVAL);
> +
>  	size = round_up(size, mem->min_page_size);
>  
>  	GEM_BUG_ON(!size);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
> index ebddc86d78f7..f2ff6f8bff74 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
> @@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
>  				     struct sg_table *pages);
>  
>  void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
> -					struct intel_memory_region *mem);
> +					struct intel_memory_region *mem,
> +					unsigned long flags);
>  void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
>  
>  struct drm_i915_gem_object *
> diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> index 2f13e4c1d999..70b467d4e811 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
> @@ -81,17 +81,17 @@ static int igt_mock_fill(void *arg)
>  
>  static void igt_mark_evictable(struct drm_i915_gem_object *obj)
>  {
> -	i915_gem_object_unpin_pages(obj);
> +	if (i915_gem_object_has_pinned_pages(obj))
> +		i915_gem_object_unpin_pages(obj);
>  	obj->mm.madv = I915_MADV_DONTNEED;
>  	list_move(&obj->mm.region_link, &obj->mm.region->purgeable);
>  }
>  
> -static int igt_mock_shrink(void *arg)
> +static int igt_frag_region(struct intel_memory_region *mem,
> +			   struct list_head *objects)
>  {
> -	struct intel_memory_region *mem = arg;
>  	struct drm_i915_gem_object *obj;
>  	unsigned long n_objects;
> -	LIST_HEAD(objects);
>  	resource_size_t target;
>  	resource_size_t total;
>  	int err = 0;
> @@ -109,7 +109,7 @@ static int igt_mock_shrink(void *arg)
>  			goto err_close_objects;
>  		}
>  
> -		list_add(&obj->st_link, &objects);
> +		list_add(&obj->st_link, objects);
>  
>  		err = i915_gem_object_pin_pages(obj);
>  		if (err)
> @@ -123,6 +123,39 @@ static int igt_mock_shrink(void *arg)
>  			igt_mark_evictable(obj);
>  	}
>  
> +	return 0;
> +
> +err_close_objects:
> +	close_objects(objects);
> +	return err;
> +}
> +
> +static void igt_defrag_region(struct list_head *objects)
> +{
> +	struct drm_i915_gem_object *obj;
> +
> +	list_for_each_entry(obj, objects, st_link) {
> +		if (obj->mm.madv == I915_MADV_WILLNEED)
> +			igt_mark_evictable(obj);
> +	}
> +}
> +
> +static int igt_mock_shrink(void *arg)
> +{
> +	struct intel_memory_region *mem = arg;
> +	struct drm_i915_gem_object *obj;
> +	LIST_HEAD(objects);
> +	resource_size_t target;
> +	resource_size_t total;
> +	int err;
> +
> +	err = igt_frag_region(mem, &objects);
> +	if (err)
> +		return err;
> +
> +	total = resource_size(&mem->region);
> +	target = mem->mm.chunk_size;
> +
>  	while (target <= total / 2) {
>  		obj = i915_gem_object_create_region(mem, target, 0);
>  		if (IS_ERR(obj)) {
> @@ -154,11 +187,120 @@ static int igt_mock_shrink(void *arg)
>  	return err;
>  }
>  
> +static int igt_mock_continuous(void *arg)
> +{
> +	struct intel_memory_region *mem = arg;
> +	struct drm_i915_gem_object *obj;
> +	LIST_HEAD(objects);
> +	resource_size_t target;
> +	resource_size_t total;
> +	int err;
> +
> +	err = igt_frag_region(mem, &objects);
> +	if (err)
> +		return err;
> +
> +	total = resource_size(&mem->region);
> +	target = total / 2;
> +
> +	/*
> +	 * Sanity check that we can allocate all of the available fragmented
> +	 * space.
> +	 */
> +	obj = i915_gem_object_create_region(mem, target, 0);
> +	if (IS_ERR(obj)) {
> +		err = PTR_ERR(obj);
> +		goto err_close_objects;
> +	}
> +
> +	list_add(&obj->st_link, &objects);
> +
> +	err = i915_gem_object_pin_pages(obj);
> +	if (err) {
> +		pr_err("failed to allocate available space\n");
> +		goto err_close_objects;
> +	}
> +
> +	igt_mark_evictable(obj);
> +
> +	/* Try the smallest possible size -- should succeed */
> +	obj = i915_gem_object_create_region(mem, mem->mm.chunk_size,
> +					    I915_BO_ALLOC_CONTIGUOUS);
> +	if (IS_ERR(obj)) {
> +		err = PTR_ERR(obj);
> +		goto err_close_objects;
> +	}
> +
> +	list_add(&obj->st_link, &objects);
> +
> +	err = i915_gem_object_pin_pages(obj);
> +	if (err) {
> +		pr_err("failed to allocate smallest possible size\n");
> +		goto err_close_objects;
> +	}
> +
> +	igt_mark_evictable(obj);
> +
> +	if (obj->mm.pages->nents != 1) {
> +		pr_err("[1]object spans multiple sg entries\n");
> +		err = -EINVAL;
> +		goto err_close_objects;
> +	}
> +
> +	/*
> +	 * Even though there is enough free space for the allocation, we
> +	 * shouldn't be able to allocate it, given that it is fragmented, and
> +	 * non-continuous.
> +	 */
> +	obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS);
> +	if (IS_ERR(obj)) {
> +		err = PTR_ERR(obj);
> +		goto err_close_objects;
> +	}
> +
> +	list_add(&obj->st_link, &objects);
> +
> +	err = i915_gem_object_pin_pages(obj);
> +	if (!err) {
> +		pr_err("expected allocation to fail\n");
> +		err = -EINVAL;
> +		goto err_close_objects;
> +	}
> +
> +	igt_defrag_region(&objects);
> +
> +	/* Should now succeed */
> +	obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS);
> +	if (IS_ERR(obj)) {
> +		err = PTR_ERR(obj);
> +		goto err_close_objects;
> +	}
> +
> +	list_add(&obj->st_link, &objects);
> +
> +	err = i915_gem_object_pin_pages(obj);
> +	if (err) {
> +		pr_err("failed to allocate from defraged area\n");
> +		goto err_close_objects;
> +	}
> +
> +	if (obj->mm.pages->nents != 1) {
> +		pr_err("object spans multiple sg entries\n");
> +		err = -EINVAL;
> +	}
> +
> +err_close_objects:
> +	close_objects(&objects);
> +
> +	return err;
> +}
> +
>  int intel_memory_region_mock_selftests(void)
>  {
>  	static const struct i915_subtest tests[] = {
>  		SUBTEST(igt_mock_fill),
>  		SUBTEST(igt_mock_shrink),
> +		SUBTEST(igt_mock_continuous),
>  	};
>  	struct intel_memory_region *mem;
>  	struct drm_i915_private *i915;
> diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
> index cc97250dca62..d73f37712c44 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_region.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_region.c
> @@ -23,6 +23,9 @@ mock_object_create(struct intel_memory_region *mem,
>  	struct drm_i915_gem_object *obj;
>  	unsigned int cache_level;
>  
> +	if (flags & I915_BO_ALLOC_CONTIGUOUS)
> +		size = roundup_pow_of_two(size);
> +
>  	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
>  		return ERR_PTR(-E2BIG);
>  
> @@ -38,7 +41,7 @@ mock_object_create(struct intel_memory_region *mem,
>  	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
>  	i915_gem_object_set_cache_coherency(obj, cache_level);
>  
> -	i915_gem_object_init_memory_region(obj, mem);
> +	i915_gem_object_init_memory_region(obj, mem, flags);
>  
>  	return obj;
>  }
> -- 
> 2.20.1
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5e2fa37e9bc0..eb92243d473b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -116,6 +116,10 @@  struct drm_i915_gem_object {
 
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
+	unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+
 	/*
 	 * Is the object to be mapped as read-only to the GPU
 	 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index be126e70c90f..d9cd722b5dbf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -42,6 +42,9 @@  i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 		return -ENOMEM;
 	}
 
+	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+		flags = I915_ALLOC_CONTIGUOUS;
+
 	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
 	if (ret)
 		goto err_free_sg;
@@ -98,10 +101,12 @@  i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 }
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-					struct intel_memory_region *mem)
+					struct intel_memory_region *mem,
+					unsigned long flags)
 {
 	INIT_LIST_HEAD(&obj->mm.blocks);
 	obj->mm.region= mem;
+	obj->flags = flags;
 
 	mutex_lock(&mem->obj_lock);
 	list_add(&obj->mm.region_link, &mem->objects);
@@ -125,6 +130,9 @@  i915_gem_object_create_region(struct intel_memory_region *mem,
 	if (!mem)
 		return ERR_PTR(-ENODEV);
 
+	if (flags & ~I915_BO_ALLOC_FLAGS)
+		return ERR_PTR(-EINVAL);
+
 	size = round_up(size, mem->min_page_size);
 
 	GEM_BUG_ON(!size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..f2ff6f8bff74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,7 +17,8 @@  void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
 				     struct sg_table *pages);
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-					struct intel_memory_region *mem);
+					struct intel_memory_region *mem,
+					unsigned long flags);
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2f13e4c1d999..70b467d4e811 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -81,17 +81,17 @@  static int igt_mock_fill(void *arg)
 
 static void igt_mark_evictable(struct drm_i915_gem_object *obj)
 {
-	i915_gem_object_unpin_pages(obj);
+	if (i915_gem_object_has_pinned_pages(obj))
+		i915_gem_object_unpin_pages(obj);
 	obj->mm.madv = I915_MADV_DONTNEED;
 	list_move(&obj->mm.region_link, &obj->mm.region->purgeable);
 }
 
-static int igt_mock_shrink(void *arg)
+static int igt_frag_region(struct intel_memory_region *mem,
+			   struct list_head *objects)
 {
-	struct intel_memory_region *mem = arg;
 	struct drm_i915_gem_object *obj;
 	unsigned long n_objects;
-	LIST_HEAD(objects);
 	resource_size_t target;
 	resource_size_t total;
 	int err = 0;
@@ -109,7 +109,7 @@  static int igt_mock_shrink(void *arg)
 			goto err_close_objects;
 		}
 
-		list_add(&obj->st_link, &objects);
+		list_add(&obj->st_link, objects);
 
 		err = i915_gem_object_pin_pages(obj);
 		if (err)
@@ -123,6 +123,39 @@  static int igt_mock_shrink(void *arg)
 			igt_mark_evictable(obj);
 	}
 
+	return 0;
+
+err_close_objects:
+	close_objects(objects);
+	return err;
+}
+
+static void igt_defrag_region(struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, objects, st_link) {
+		if (obj->mm.madv == I915_MADV_WILLNEED)
+			igt_mark_evictable(obj);
+	}
+}
+
+static int igt_mock_shrink(void *arg)
+{
+	struct intel_memory_region *mem = arg;
+	struct drm_i915_gem_object *obj;
+	LIST_HEAD(objects);
+	resource_size_t target;
+	resource_size_t total;
+	int err;
+
+	err = igt_frag_region(mem, &objects);
+	if (err)
+		return err;
+
+	total = resource_size(&mem->region);
+	target = mem->mm.chunk_size;
+
 	while (target <= total / 2) {
 		obj = i915_gem_object_create_region(mem, target, 0);
 		if (IS_ERR(obj)) {
@@ -154,11 +187,120 @@  static int igt_mock_shrink(void *arg)
 	return err;
 }
 
+static int igt_mock_continuous(void *arg)
+{
+	struct intel_memory_region *mem = arg;
+	struct drm_i915_gem_object *obj;
+	LIST_HEAD(objects);
+	resource_size_t target;
+	resource_size_t total;
+	int err;
+
+	err = igt_frag_region(mem, &objects);
+	if (err)
+		return err;
+
+	total = resource_size(&mem->region);
+	target = total / 2;
+
+	/*
+	 * Sanity check that we can allocate all of the available fragmented
+	 * space.
+	 */
+	obj = i915_gem_object_create_region(mem, target, 0);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_close_objects;
+	}
+
+	list_add(&obj->st_link, &objects);
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		pr_err("failed to allocate available space\n");
+		goto err_close_objects;
+	}
+
+	igt_mark_evictable(obj);
+
+	/* Try the smallest possible size -- should succeed */
+	obj = i915_gem_object_create_region(mem, mem->mm.chunk_size,
+					    I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_close_objects;
+	}
+
+	list_add(&obj->st_link, &objects);
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		pr_err("failed to allocate smallest possible size\n");
+		goto err_close_objects;
+	}
+
+	igt_mark_evictable(obj);
+
+	if (obj->mm.pages->nents != 1) {
+		pr_err("[1]object spans multiple sg entries\n");
+		err = -EINVAL;
+		goto err_close_objects;
+	}
+
+	/*
+	 * Even though there is enough free space for the allocation, we
+	 * shouldn't be able to allocate it, given that it is fragmented, and
+	 * non-continuous.
+	 */
+	obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_close_objects;
+	}
+
+	list_add(&obj->st_link, &objects);
+
+	err = i915_gem_object_pin_pages(obj);
+	if (!err) {
+		pr_err("expected allocation to fail\n");
+		err = -EINVAL;
+		goto err_close_objects;
+	}
+
+	igt_defrag_region(&objects);
+
+	/* Should now succeed */
+	obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_close_objects;
+	}
+
+	list_add(&obj->st_link, &objects);
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err) {
+		pr_err("failed to allocate from defraged area\n");
+		goto err_close_objects;
+	}
+
+	if (obj->mm.pages->nents != 1) {
+		pr_err("object spans multiple sg entries\n");
+		err = -EINVAL;
+	}
+
+err_close_objects:
+	close_objects(&objects);
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_mock_fill),
 		SUBTEST(igt_mock_shrink),
+		SUBTEST(igt_mock_continuous),
 	};
 	struct intel_memory_region *mem;
 	struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index cc97250dca62..d73f37712c44 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -23,6 +23,9 @@  mock_object_create(struct intel_memory_region *mem,
 	struct drm_i915_gem_object *obj;
 	unsigned int cache_level;
 
+	if (flags & I915_BO_ALLOC_CONTIGUOUS)
+		size = roundup_pow_of_two(size);
+
 	if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
 		return ERR_PTR(-E2BIG);
 
@@ -38,7 +41,7 @@  mock_object_create(struct intel_memory_region *mem,
 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
 	i915_gem_object_set_cache_coherency(obj, cache_level);
 
-	i915_gem_object_init_memory_region(obj, mem);
+	i915_gem_object_init_memory_region(obj, mem, flags);
 
 	return obj;
 }