diff mbox series

[v3,2/5] drm/i915/gem: Introduce a selftest for the gem object migrate functionality

Message ID 20210628144626.76126-3-thomas.hellstrom@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915/gem: Introduce a migrate interface | expand

Commit Message

Thomas Hellström June 28, 2021, 2:46 p.m. UTC
From: Matthew Auld <matthew.auld@intel.com>

A selftest for the gem object migrate functionality. Slightly adapted
from the original by Matthew to the new interface and new fill blit
code.

Co-developed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 +
 .../drm/i915/gem/selftests/i915_gem_migrate.c | 237 ++++++++++++++++++
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 3 files changed, 239 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c

Comments

Matthew Auld June 28, 2021, 3:51 p.m. UTC | #1
On 28/06/2021 15:46, Thomas Hellström wrote:
> From: Matthew Auld <matthew.auld@intel.com>
> 
> A selftest for the gem object migrate functionality. Slightly adapted
> from the original by Matthew to the new interface and new fill blit
> code.
> 
> Co-developed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 +
>   .../drm/i915/gem/selftests/i915_gem_migrate.c | 237 ++++++++++++++++++
>   .../drm/i915/selftests/i915_live_selftests.h  |   1 +
>   3 files changed, 239 insertions(+)
>   create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 1c18be067b58..ff147fd59874 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -649,6 +649,7 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
>   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>   #include "selftests/huge_gem_object.c"
>   #include "selftests/huge_pages.c"
> +#include "selftests/i915_gem_migrate.c"
>   #include "selftests/i915_gem_object.c"
>   #include "selftests/i915_gem_coherency.c"
>   #endif
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> new file mode 100644
> index 000000000000..a437b66f64d9
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> @@ -0,0 +1,237 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2020-2021 Intel Corporation
> + */
> +
> +#include "gt/intel_migrate.h"
> +
> +static int igt_smem_create_migrate(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct drm_i915_private *i915 = gt->i915;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_gem_ww_ctx ww;
> +	int err = 0;
> +
> +	/* Switch object backing-store on create */
> +	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
> +	if (IS_ERR(obj))
> +		return PTR_ERR(obj);
> +
> +	for_i915_gem_ww(&ww, err, true) {
> +		err = i915_gem_object_lock(obj, &ww);
> +		if (err)
> +			continue;
> +
> +		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM)) {
> +			err = -EINVAL;
> +			continue;
> +		}
> +
> +		err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
> +		if (err)
> +			continue;
> +
> +		err = i915_gem_object_pin_pages(obj);
> +		if (err)
> +			continue;
> +
> +		if (i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM))
> +			err = -EINVAL;
> +
> +		i915_gem_object_unpin_pages(obj);
> +	}
> +	i915_gem_object_put(obj);
> +
> +	return err;
> +}
> +
> +static int igt_lmem_create_migrate(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct drm_i915_private *i915 = gt->i915;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_gem_ww_ctx ww;
> +	int err = 0;
> +
> +	/* Switch object backing-store on create */
> +	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
> +	if (IS_ERR(obj))
> +		return PTR_ERR(obj);
> +
> +	for_i915_gem_ww(&ww, err, true) {
> +		err = i915_gem_object_lock(obj, &ww);
> +		if (err)
> +			continue;
> +
> +		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
> +			err = -EINVAL;
> +			continue;
> +		}
> +
> +		err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
> +		if (err)
> +			continue;
> +
> +		err = i915_gem_object_pin_pages(obj);
> +		if (err)
> +			continue;
> +
> +		if (i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
> +			err = -EINVAL;
> +
> +		i915_gem_object_unpin_pages(obj);
> +	}
> +	i915_gem_object_put(obj);
> +
> +	return err;
> +}
> +
> +static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
> +				  struct drm_i915_gem_object *obj)
> +{
> +	int err;
> +
> +	err = i915_gem_object_lock(obj, ww);
> +	if (err)
> +		return err;
> +
> +	err = i915_gem_object_wait(obj,
> +				   I915_WAIT_INTERRUPTIBLE |
> +				   I915_WAIT_PRIORITY |
> +				   I915_WAIT_ALL,
> +				   MAX_SCHEDULE_TIMEOUT);
> +	if (err)
> +		return err;
> +
> +	if (i915_gem_object_is_lmem(obj)) {
> +		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM)) {
> +			pr_err("object can't migrate to smem.\n");
> +			return -EINVAL;
> +		}
> +
> +		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
> +		if (err) {
> +			pr_err("Object failed migration to smem\n");
> +			if (err)
> +				return err;
> +		}
> +
> +		if (i915_gem_object_is_lmem(obj)) {
> +			pr_err("object still backed by lmem\n");
> +			err = -EINVAL;
> +		}
> +
> +		if (!i915_gem_object_has_struct_page(obj)) {
> +			pr_err("object not backed by struct page\n");
> +			err = -EINVAL;
> +		}
> +
> +	} else {
> +		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
> +			pr_err("object can't migrate to lmem.\n");
> +			return -EINVAL;
> +		}
> +
> +		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
> +		if (err) {
> +			pr_err("Object failed migration to lmem\n");
> +			if (err)
> +				return err;
> +		}
> +
> +		if (i915_gem_object_has_struct_page(obj)) {
> +			pr_err("object still backed by struct page\n");
> +			err = -EINVAL;
> +		}
> +
> +		if (!i915_gem_object_is_lmem(obj)) {
> +			pr_err("object not backed by lmem\n");
> +			err = -EINVAL;
> +		}
> +	}
> +
> +	return err;
> +}
> +
> +static int igt_lmem_pages_migrate(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct drm_i915_private *i915 = gt->i915;
> +	struct drm_i915_gem_object *obj;
> +	struct i915_gem_ww_ctx ww;
> +	struct i915_request *rq;
> +	int err;
> +	int i;
> +
> +	/* From LMEM to shmem and back again */
> +
> +	obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
> +	if (IS_ERR(obj))
> +		return PTR_ERR(obj);
> +
> +	err = i915_gem_object_lock(obj, NULL);
> +	if (err)
> +		goto out_put;
> +
> +	err = ____i915_gem_object_get_pages(obj);
> +	if (err) {
> +		i915_gem_object_unlock(obj);
> +		goto out_put;
> +	}
> +
> +	err = intel_context_migrate_clear(gt->migrate.context, NULL,
> +					  obj->mm.pages->sgl, obj->cache_level,
> +					  i915_gem_object_is_lmem(obj),
> +					  0, &rq);
> +	if (rq) {
> +		dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
> +		i915_request_put(rq);
> +	}
> +	i915_gem_object_unlock(obj);
> +	if (err)
> +		goto out_put;
> +
> +	for (i = 1; i <= 4; ++i) {
> +		for_i915_gem_ww(&ww, err, true) {
> +			err = lmem_pages_migrate_one(&ww, obj);
> +			if (err)
> +				continue;
> +
> +			err = i915_gem_object_wait_migration(obj, true);
> +			if (err)
> +				continue;
> +
> +			err = intel_migrate_clear(&gt->migrate, &ww, NULL,
> +						  obj->mm.pages->sgl,
> +						  obj->cache_level,
> +						  i915_gem_object_is_lmem(obj),
> +						  0xdeadbeaf, &rq);
> +			if (rq) {
> +				dma_resv_add_excl_fence(obj->base.resv,
> +							&rq->fence);
> +				i915_request_put(rq);
> +			}
> +		}
> +		if (err)
> +			break;
> +	}
> +out_put:
> +	i915_gem_object_put(obj);
> +
> +	return err;
> +}
> +
> +int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
> +{
> +	static const struct i915_subtest tests[] = {
> +		SUBTEST(igt_smem_create_migrate),
> +		SUBTEST(igt_lmem_create_migrate),
> +		SUBTEST(igt_lmem_pages_migrate),

Might be good to dirty the pages before the migration, and then verify 
they still match at the end?

> +	};
> +
> +	if (!HAS_LMEM(i915))
> +		return 0;
> +
> +	return intel_gt_live_subtests(tests, &i915->gt);
> +}
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index a68197cf1044..e2fd1b61af71 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -40,6 +40,7 @@ selftest(hugepages, i915_gem_huge_page_live_selftests)
>   selftest(gem_contexts, i915_gem_context_live_selftests)
>   selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
>   selftest(client, i915_gem_client_blt_live_selftests)
> +selftest(gem_migrate, i915_gem_migrate_live_selftests)
>   selftest(reset, intel_reset_live_selftests)
>   selftest(memory_region, intel_memory_region_live_selftests)
>   selftest(hangcheck, intel_hangcheck_live_selftests)
>
Michael J. Ruhl June 28, 2021, 6:53 p.m. UTC | #2
>-----Original Message-----
>From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf Of
>Thomas Hellström
>Sent: Monday, June 28, 2021 10:46 AM
>To: intel-gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
>Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>; Auld, Matthew
><matthew.auld@intel.com>
>Subject: [Intel-gfx] [PATCH v3 2/5] drm/i915/gem: Introduce a selftest for the
>gem object migrate functionality
>
>From: Matthew Auld <matthew.auld@intel.com>
>
>A selftest for the gem object migrate functionality. Slightly adapted
>from the original by Matthew to the new interface and new fill blit
>code.
>
>Co-developed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>---
> drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 +
> .../drm/i915/gem/selftests/i915_gem_migrate.c | 237
>++++++++++++++++++
> .../drm/i915/selftests/i915_live_selftests.h  |   1 +
> 3 files changed, 239 insertions(+)
> create mode 100644
>drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>index 1c18be067b58..ff147fd59874 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>@@ -649,6 +649,7 @@ static const struct drm_gem_object_funcs
>i915_gem_object_funcs = {
> #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> #include "selftests/huge_gem_object.c"
> #include "selftests/huge_pages.c"
>+#include "selftests/i915_gem_migrate.c"
> #include "selftests/i915_gem_object.c"
> #include "selftests/i915_gem_coherency.c"
> #endif
>diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>new file mode 100644
>index 000000000000..a437b66f64d9
>--- /dev/null
>+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>@@ -0,0 +1,237 @@
>+// SPDX-License-Identifier: MIT
>+/*
>+ * Copyright © 2020-2021 Intel Corporation
>+ */
>+
>+#include "gt/intel_migrate.h"
>+
>+static int igt_smem_create_migrate(void *arg)
>+{
>+	struct intel_gt *gt = arg;
>+	struct drm_i915_private *i915 = gt->i915;
>+	struct drm_i915_gem_object *obj;
>+	struct i915_gem_ww_ctx ww;
>+	int err = 0;
>+
>+	/* Switch object backing-store on create */
>+	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
>+	if (IS_ERR(obj))
>+		return PTR_ERR(obj);
>+
>+	for_i915_gem_ww(&ww, err, true) {
>+		err = i915_gem_object_lock(obj, &ww);
>+		if (err)
>+			continue;
>+
>+		if (!i915_gem_object_can_migrate(obj,
>INTEL_REGION_SMEM)) {
>+			err = -EINVAL;
>+			continue;
>+		}
>+
>+		err = i915_gem_object_migrate(obj, &ww,
>INTEL_REGION_SMEM);
>+		if (err)
>+			continue;
>+
>+		err = i915_gem_object_pin_pages(obj);
>+		if (err)
>+			continue;
>+
>+		if (i915_gem_object_can_migrate(obj,
>INTEL_REGION_LMEM))
>+			err = -EINVAL;
>+
>+		i915_gem_object_unpin_pages(obj);
>+	}
>+	i915_gem_object_put(obj);
>+
>+	return err;
>+}
>+
>+static int igt_lmem_create_migrate(void *arg)
>+{
>+	struct intel_gt *gt = arg;
>+	struct drm_i915_private *i915 = gt->i915;
>+	struct drm_i915_gem_object *obj;
>+	struct i915_gem_ww_ctx ww;
>+	int err = 0;
>+
>+	/* Switch object backing-store on create */
>+	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
>+	if (IS_ERR(obj))
>+		return PTR_ERR(obj);
>+
>+	for_i915_gem_ww(&ww, err, true) {
>+		err = i915_gem_object_lock(obj, &ww);
>+		if (err)
>+			continue;
>+
>+		if (!i915_gem_object_can_migrate(obj,
>INTEL_REGION_LMEM)) {
>+			err = -EINVAL;
>+			continue;
>+		}
>+
>+		err = i915_gem_object_migrate(obj, &ww,
>INTEL_REGION_LMEM);
>+		if (err)
>+			continue;
>+
>+		err = i915_gem_object_pin_pages(obj);
>+		if (err)
>+			continue;
>+
>+		if (i915_gem_object_can_migrate(obj,
>INTEL_REGION_SMEM))
>+			err = -EINVAL;
>+
>+		i915_gem_object_unpin_pages(obj);
>+	}
>+	i915_gem_object_put(obj);
>+
>+	return err;
>+}
>+
>+static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
>+				  struct drm_i915_gem_object *obj)
>+{
>+	int err;
>+
>+	err = i915_gem_object_lock(obj, ww);
>+	if (err)
>+		return err;
>+
>+	err = i915_gem_object_wait(obj,
>+				   I915_WAIT_INTERRUPTIBLE |
>+				   I915_WAIT_PRIORITY |
>+				   I915_WAIT_ALL,
>+				   MAX_SCHEDULE_TIMEOUT);
>+	if (err)
>+		return err;
>+
>+	if (i915_gem_object_is_lmem(obj)) {
>+		if (!i915_gem_object_can_migrate(obj,
>INTEL_REGION_SMEM)) {

I don't see any  testing of the parameter num_allowed.

Is that done somewhere else?

Mike

>+			pr_err("object can't migrate to smem.\n");
>+			return -EINVAL;
>+		}
>+
>+		err = i915_gem_object_migrate(obj, ww,
>INTEL_REGION_SMEM);
>+		if (err) {
>+			pr_err("Object failed migration to smem\n");
>+			if (err)
>+				return err;
>+		}
>+
>+		if (i915_gem_object_is_lmem(obj)) {
>+			pr_err("object still backed by lmem\n");
>+			err = -EINVAL;
>+		}
>+
>+		if (!i915_gem_object_has_struct_page(obj)) {
>+			pr_err("object not backed by struct page\n");
>+			err = -EINVAL;
>+		}
>+
>+	} else {
>+		if (!i915_gem_object_can_migrate(obj,
>INTEL_REGION_LMEM)) {
>+			pr_err("object can't migrate to lmem.\n");
>+			return -EINVAL;
>+		}
>+
>+		err = i915_gem_object_migrate(obj, ww,
>INTEL_REGION_LMEM);
>+		if (err) {
>+			pr_err("Object failed migration to lmem\n");
>+			if (err)
>+				return err;
>+		}
>+
>+		if (i915_gem_object_has_struct_page(obj)) {
>+			pr_err("object still backed by struct page\n");
>+			err = -EINVAL;
>+		}
>+
>+		if (!i915_gem_object_is_lmem(obj)) {
>+			pr_err("object not backed by lmem\n");
>+			err = -EINVAL;
>+		}
>+	}
>+
>+	return err;
>+}
>+
>+static int igt_lmem_pages_migrate(void *arg)
>+{
>+	struct intel_gt *gt = arg;
>+	struct drm_i915_private *i915 = gt->i915;
>+	struct drm_i915_gem_object *obj;
>+	struct i915_gem_ww_ctx ww;
>+	struct i915_request *rq;
>+	int err;
>+	int i;
>+
>+	/* From LMEM to shmem and back again */
>+
>+	obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
>+	if (IS_ERR(obj))
>+		return PTR_ERR(obj);
>+
>+	err = i915_gem_object_lock(obj, NULL);
>+	if (err)
>+		goto out_put;
>+
>+	err = ____i915_gem_object_get_pages(obj);
>+	if (err) {
>+		i915_gem_object_unlock(obj);
>+		goto out_put;
>+	}
>+
>+	err = intel_context_migrate_clear(gt->migrate.context, NULL,
>+					  obj->mm.pages->sgl, obj-
>>cache_level,
>+					  i915_gem_object_is_lmem(obj),
>+					  0, &rq);
>+	if (rq) {
>+		dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
>+		i915_request_put(rq);
>+	}
>+	i915_gem_object_unlock(obj);
>+	if (err)
>+		goto out_put;
>+
>+	for (i = 1; i <= 4; ++i) {
>+		for_i915_gem_ww(&ww, err, true) {
>+			err = lmem_pages_migrate_one(&ww, obj);
>+			if (err)
>+				continue;
>+
>+			err = i915_gem_object_wait_migration(obj, true);
>+			if (err)
>+				continue;
>+
>+			err = intel_migrate_clear(&gt->migrate, &ww, NULL,
>+						  obj->mm.pages->sgl,
>+						  obj->cache_level,
>+
>i915_gem_object_is_lmem(obj),
>+						  0xdeadbeaf, &rq);
>+			if (rq) {
>+				dma_resv_add_excl_fence(obj->base.resv,
>+							&rq->fence);
>+				i915_request_put(rq);
>+			}
>+		}
>+		if (err)
>+			break;
>+	}
>+out_put:
>+	i915_gem_object_put(obj);
>+
>+	return err;
>+}
>+
>+int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
>+{
>+	static const struct i915_subtest tests[] = {
>+		SUBTEST(igt_smem_create_migrate),
>+		SUBTEST(igt_lmem_create_migrate),
>+		SUBTEST(igt_lmem_pages_migrate),
>+	};
>+
>+	if (!HAS_LMEM(i915))
>+		return 0;
>+
>+	return intel_gt_live_subtests(tests, &i915->gt);
>+}
>diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
>b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
>index a68197cf1044..e2fd1b61af71 100644
>--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
>+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
>@@ -40,6 +40,7 @@ selftest(hugepages,
>i915_gem_huge_page_live_selftests)
> selftest(gem_contexts, i915_gem_context_live_selftests)
> selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
> selftest(client, i915_gem_client_blt_live_selftests)
>+selftest(gem_migrate, i915_gem_migrate_live_selftests)
> selftest(reset, intel_reset_live_selftests)
> selftest(memory_region, intel_memory_region_live_selftests)
> selftest(hangcheck, intel_hangcheck_live_selftests)
>--
>2.31.1
>
>_______________________________________________
>Intel-gfx mailing list
>Intel-gfx@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Thomas Hellström June 28, 2021, 7:14 p.m. UTC | #3
On Mon, 2021-06-28 at 18:53 +0000, Ruhl, Michael J wrote:
> > -----Original Message-----
> > From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf
> > Of
> > Thomas Hellström
> > Sent: Monday, June 28, 2021 10:46 AM
> > To: intel-gfx@lists.freedesktop.org;   
> > dri-devel@lists.freedesktop.org
> > Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>; Auld,
> > Matthew
> > <matthew.auld@intel.com>
> > Subject: [Intel-gfx] [PATCH v3 2/5] drm/i915/gem: Introduce a
> > selftest for the
> > gem object migrate functionality
> > 
> > From: Matthew Auld <matthew.auld@intel.com>
> > 
> > A selftest for the gem object migrate functionality. Slightly
> > adapted
> > from the original by Matthew to the new interface and new fill blit
> > code.
> > 
> > Co-developed-by: Thomas Hellström
> > <thomas.hellstrom@linux.intel.com>
> > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> > ---
> > drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 +
> > .../drm/i915/gem/selftests/i915_gem_migrate.c | 237
> > ++++++++++++++++++
> > .../drm/i915/selftests/i915_live_selftests.h  |   1 +
> > 3 files changed, 239 insertions(+)
> > create mode 100644
> > drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> > 
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > index 1c18be067b58..ff147fd59874 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> > @@ -649,6 +649,7 @@ static const struct drm_gem_object_funcs
> > i915_gem_object_funcs = {
> > #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> > #include "selftests/huge_gem_object.c"
> > #include "selftests/huge_pages.c"
> > +#include "selftests/i915_gem_migrate.c"
> > #include "selftests/i915_gem_object.c"
> > #include "selftests/i915_gem_coherency.c"
> > #endif
> > diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> > b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> > new file mode 100644
> > index 000000000000..a437b66f64d9
> > --- /dev/null
> > +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
> > @@ -0,0 +1,237 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2020-2021 Intel Corporation
> > + */
> > +
> > +#include "gt/intel_migrate.h"
> > +
> > +static int igt_smem_create_migrate(void *arg)
> > +{
> > +       struct intel_gt *gt = arg;
> > +       struct drm_i915_private *i915 = gt->i915;
> > +       struct drm_i915_gem_object *obj;
> > +       struct i915_gem_ww_ctx ww;
> > +       int err = 0;
> > +
> > +       /* Switch object backing-store on create */
> > +       obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
> > +       if (IS_ERR(obj))
> > +               return PTR_ERR(obj);
> > +
> > +       for_i915_gem_ww(&ww, err, true) {
> > +               err = i915_gem_object_lock(obj, &ww);
> > +               if (err)
> > +                       continue;
> > +
> > +               if (!i915_gem_object_can_migrate(obj,
> > INTEL_REGION_SMEM)) {
> > +                       err = -EINVAL;
> > +                       continue;
> > +               }
> > +
> > +               err = i915_gem_object_migrate(obj, &ww,
> > INTEL_REGION_SMEM);
> > +               if (err)
> > +                       continue;
> > +
> > +               err = i915_gem_object_pin_pages(obj);
> > +               if (err)
> > +                       continue;
> > +
> > +               if (i915_gem_object_can_migrate(obj,
> > INTEL_REGION_LMEM))
> > +                       err = -EINVAL;
> > +
> > +               i915_gem_object_unpin_pages(obj);
> > +       }
> > +       i915_gem_object_put(obj);
> > +
> > +       return err;
> > +}
> > +
> > +static int igt_lmem_create_migrate(void *arg)
> > +{
> > +       struct intel_gt *gt = arg;
> > +       struct drm_i915_private *i915 = gt->i915;
> > +       struct drm_i915_gem_object *obj;
> > +       struct i915_gem_ww_ctx ww;
> > +       int err = 0;
> > +
> > +       /* Switch object backing-store on create */
> > +       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
> > +       if (IS_ERR(obj))
> > +               return PTR_ERR(obj);
> > +
> > +       for_i915_gem_ww(&ww, err, true) {
> > +               err = i915_gem_object_lock(obj, &ww);
> > +               if (err)
> > +                       continue;
> > +
> > +               if (!i915_gem_object_can_migrate(obj,
> > INTEL_REGION_LMEM)) {
> > +                       err = -EINVAL;
> > +                       continue;
> > +               }
> > +
> > +               err = i915_gem_object_migrate(obj, &ww,
> > INTEL_REGION_LMEM);
> > +               if (err)
> > +                       continue;
> > +
> > +               err = i915_gem_object_pin_pages(obj);
> > +               if (err)
> > +                       continue;
> > +
> > +               if (i915_gem_object_can_migrate(obj,
> > INTEL_REGION_SMEM))
> > +                       err = -EINVAL;
> > +
> > +               i915_gem_object_unpin_pages(obj);
> > +       }
> > +       i915_gem_object_put(obj);
> > +
> > +       return err;
> > +}
> > +
> > +static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
> > +                                 struct drm_i915_gem_object *obj)
> > +{
> > +       int err;
> > +
> > +       err = i915_gem_object_lock(obj, ww);
> > +       if (err)
> > +               return err;
> > +
> > +       err = i915_gem_object_wait(obj,
> > +                                  I915_WAIT_INTERRUPTIBLE |
> > +                                  I915_WAIT_PRIORITY |
> > +                                  I915_WAIT_ALL,
> > +                                  MAX_SCHEDULE_TIMEOUT);
> > +       if (err)
> > +               return err;
> > +
> > +       if (i915_gem_object_is_lmem(obj)) {
> > +               if (!i915_gem_object_can_migrate(obj,
> > INTEL_REGION_SMEM)) {
> 
> I don't see any  testing of the parameter num_allowed.
> 
> Is that done somewhere else?
> 
> Mike

That's a user-space submitted parameter only, dictating what region the
object is allowed in when bound to the GPU. It's not exercised in this
selftest.

Thanks,
Thomas
Michael J. Ruhl June 28, 2021, 7:27 p.m. UTC | #4
>-----Original Message-----
>From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>Sent: Monday, June 28, 2021 3:15 PM
>To: Ruhl, Michael J <michael.j.ruhl@intel.com>; intel-
>gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
>Cc: Auld, Matthew <matthew.auld@intel.com>
>Subject: Re: [Intel-gfx] [PATCH v3 2/5] drm/i915/gem: Introduce a selftest for
>the gem object migrate functionality
>
>On Mon, 2021-06-28 at 18:53 +0000, Ruhl, Michael J wrote:
>> > -----Original Message-----
>> > From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf
>> > Of
>> > Thomas Hellström
>> > Sent: Monday, June 28, 2021 10:46 AM
>> > To: intel-gfx@lists.freedesktop.org;
>> > dri-devel@lists.freedesktop.org
>> > Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>; Auld,
>> > Matthew
>> > <matthew.auld@intel.com>
>> > Subject: [Intel-gfx] [PATCH v3 2/5] drm/i915/gem: Introduce a
>> > selftest for the
>> > gem object migrate functionality
>> >
>> > From: Matthew Auld <matthew.auld@intel.com>
>> >
>> > A selftest for the gem object migrate functionality. Slightly
>> > adapted
>> > from the original by Matthew to the new interface and new fill blit
>> > code.
>> >
>> > Co-developed-by: Thomas Hellström
>> > <thomas.hellstrom@linux.intel.com>
>> > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> > Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> > ---
>> > drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 +
>> > .../drm/i915/gem/selftests/i915_gem_migrate.c | 237
>> > ++++++++++++++++++
>> > .../drm/i915/selftests/i915_live_selftests.h  |   1 +
>> > 3 files changed, 239 insertions(+)
>> > create mode 100644
>> > drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>> >
>> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>> > b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>> > index 1c18be067b58..ff147fd59874 100644
>> > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>> > @@ -649,6 +649,7 @@ static const struct drm_gem_object_funcs
>> > i915_gem_object_funcs = {
>> > #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>> > #include "selftests/huge_gem_object.c"
>> > #include "selftests/huge_pages.c"
>> > +#include "selftests/i915_gem_migrate.c"
>> > #include "selftests/i915_gem_object.c"
>> > #include "selftests/i915_gem_coherency.c"
>> > #endif
>> > diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>> > b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>> > new file mode 100644
>> > index 000000000000..a437b66f64d9
>> > --- /dev/null
>> > +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>> > @@ -0,0 +1,237 @@
>> > +// SPDX-License-Identifier: MIT
>> > +/*
>> > + * Copyright © 2020-2021 Intel Corporation
>> > + */
>> > +
>> > +#include "gt/intel_migrate.h"
>> > +
>> > +static int igt_smem_create_migrate(void *arg)
>> > +{
>> > +       struct intel_gt *gt = arg;
>> > +       struct drm_i915_private *i915 = gt->i915;
>> > +       struct drm_i915_gem_object *obj;
>> > +       struct i915_gem_ww_ctx ww;
>> > +       int err = 0;
>> > +
>> > +       /* Switch object backing-store on create */
>> > +       obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
>> > +       if (IS_ERR(obj))
>> > +               return PTR_ERR(obj);
>> > +
>> > +       for_i915_gem_ww(&ww, err, true) {
>> > +               err = i915_gem_object_lock(obj, &ww);
>> > +               if (err)
>> > +                       continue;
>> > +
>> > +               if (!i915_gem_object_can_migrate(obj,
>> > INTEL_REGION_SMEM)) {
>> > +                       err = -EINVAL;
>> > +                       continue;
>> > +               }
>> > +
>> > +               err = i915_gem_object_migrate(obj, &ww,
>> > INTEL_REGION_SMEM);
>> > +               if (err)
>> > +                       continue;
>> > +
>> > +               err = i915_gem_object_pin_pages(obj);
>> > +               if (err)
>> > +                       continue;
>> > +
>> > +               if (i915_gem_object_can_migrate(obj,
>> > INTEL_REGION_LMEM))
>> > +                       err = -EINVAL;
>> > +
>> > +               i915_gem_object_unpin_pages(obj);
>> > +       }
>> > +       i915_gem_object_put(obj);
>> > +
>> > +       return err;
>> > +}
>> > +
>> > +static int igt_lmem_create_migrate(void *arg)
>> > +{
>> > +       struct intel_gt *gt = arg;
>> > +       struct drm_i915_private *i915 = gt->i915;
>> > +       struct drm_i915_gem_object *obj;
>> > +       struct i915_gem_ww_ctx ww;
>> > +       int err = 0;
>> > +
>> > +       /* Switch object backing-store on create */
>> > +       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
>> > +       if (IS_ERR(obj))
>> > +               return PTR_ERR(obj);
>> > +
>> > +       for_i915_gem_ww(&ww, err, true) {
>> > +               err = i915_gem_object_lock(obj, &ww);
>> > +               if (err)
>> > +                       continue;
>> > +
>> > +               if (!i915_gem_object_can_migrate(obj,
>> > INTEL_REGION_LMEM)) {
>> > +                       err = -EINVAL;
>> > +                       continue;
>> > +               }
>> > +
>> > +               err = i915_gem_object_migrate(obj, &ww,
>> > INTEL_REGION_LMEM);
>> > +               if (err)
>> > +                       continue;
>> > +
>> > +               err = i915_gem_object_pin_pages(obj);
>> > +               if (err)
>> > +                       continue;
>> > +
>> > +               if (i915_gem_object_can_migrate(obj,
>> > INTEL_REGION_SMEM))
>> > +                       err = -EINVAL;
>> > +
>> > +               i915_gem_object_unpin_pages(obj);
>> > +       }
>> > +       i915_gem_object_put(obj);
>> > +
>> > +       return err;
>> > +}
>> > +
>> > +static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
>> > +                                 struct drm_i915_gem_object *obj)
>> > +{
>> > +       int err;
>> > +
>> > +       err = i915_gem_object_lock(obj, ww);
>> > +       if (err)
>> > +               return err;
>> > +
>> > +       err = i915_gem_object_wait(obj,
>> > +                                  I915_WAIT_INTERRUPTIBLE |
>> > +                                  I915_WAIT_PRIORITY |
>> > +                                  I915_WAIT_ALL,
>> > +                                  MAX_SCHEDULE_TIMEOUT);
>> > +       if (err)
>> > +               return err;
>> > +
>> > +       if (i915_gem_object_is_lmem(obj)) {
>> > +               if (!i915_gem_object_can_migrate(obj,
>> > INTEL_REGION_SMEM)) {
>>
>> I don't see any  testing of the parameter num_allowed.
>>
>> Is that done somewhere else?
>>
>> Mike
>
>That's a user-space submitted parameter only, dictating what region the
>object is allowed in when bound to the GPU. It's not exercised in this
>selftest.

Got it.

Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>

M

>Thanks,
>Thomas
>
Thomas Hellström June 28, 2021, 7:32 p.m. UTC | #5
On 6/28/21 9:27 PM, Ruhl, Michael J wrote:
>> -----Original Message-----
>> From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Sent: Monday, June 28, 2021 3:15 PM
>> To: Ruhl, Michael J <michael.j.ruhl@intel.com>; intel-
>> gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
>> Cc: Auld, Matthew <matthew.auld@intel.com>
>> Subject: Re: [Intel-gfx] [PATCH v3 2/5] drm/i915/gem: Introduce a selftest for
>> the gem object migrate functionality
>>
>> On Mon, 2021-06-28 at 18:53 +0000, Ruhl, Michael J wrote:
>>>> -----Original Message-----
>>>> From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf
>>>> Of
>>>> Thomas Hellström
>>>> Sent: Monday, June 28, 2021 10:46 AM
>>>> To: intel-gfx@lists.freedesktop.org;
>>>> dri-devel@lists.freedesktop.org
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>; Auld,
>>>> Matthew
>>>> <matthew.auld@intel.com>
>>>> Subject: [Intel-gfx] [PATCH v3 2/5] drm/i915/gem: Introduce a
>>>> selftest for the
>>>> gem object migrate functionality
>>>>
>>>> From: Matthew Auld <matthew.auld@intel.com>
>>>>
>>>> A selftest for the gem object migrate functionality. Slightly
>>>> adapted
>>>> from the original by Matthew to the new interface and new fill blit
>>>> code.
>>>>
>>>> Co-developed-by: Thomas Hellström
>>>> <thomas.hellstrom@linux.intel.com>
>>>> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> ---
>>>> drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 +
>>>> .../drm/i915/gem/selftests/i915_gem_migrate.c | 237
>>>> ++++++++++++++++++
>>>> .../drm/i915/selftests/i915_live_selftests.h  |   1 +
>>>> 3 files changed, 239 insertions(+)
>>>> create mode 100644
>>>> drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>>>>
>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>>> b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>>> index 1c18be067b58..ff147fd59874 100644
>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
>>>> @@ -649,6 +649,7 @@ static const struct drm_gem_object_funcs
>>>> i915_gem_object_funcs = {
>>>> #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>>>> #include "selftests/huge_gem_object.c"
>>>> #include "selftests/huge_pages.c"
>>>> +#include "selftests/i915_gem_migrate.c"
>>>> #include "selftests/i915_gem_object.c"
>>>> #include "selftests/i915_gem_coherency.c"
>>>> #endif
>>>> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>>>> b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>>>> new file mode 100644
>>>> index 000000000000..a437b66f64d9
>>>> --- /dev/null
>>>> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
>>>> @@ -0,0 +1,237 @@
>>>> +// SPDX-License-Identifier: MIT
>>>> +/*
>>>> + * Copyright © 2020-2021 Intel Corporation
>>>> + */
>>>> +
>>>> +#include "gt/intel_migrate.h"
>>>> +
>>>> +static int igt_smem_create_migrate(void *arg)
>>>> +{
>>>> +       struct intel_gt *gt = arg;
>>>> +       struct drm_i915_private *i915 = gt->i915;
>>>> +       struct drm_i915_gem_object *obj;
>>>> +       struct i915_gem_ww_ctx ww;
>>>> +       int err = 0;
>>>> +
>>>> +       /* Switch object backing-store on create */
>>>> +       obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
>>>> +       if (IS_ERR(obj))
>>>> +               return PTR_ERR(obj);
>>>> +
>>>> +       for_i915_gem_ww(&ww, err, true) {
>>>> +               err = i915_gem_object_lock(obj, &ww);
>>>> +               if (err)
>>>> +                       continue;
>>>> +
>>>> +               if (!i915_gem_object_can_migrate(obj,
>>>> INTEL_REGION_SMEM)) {
>>>> +                       err = -EINVAL;
>>>> +                       continue;
>>>> +               }
>>>> +
>>>> +               err = i915_gem_object_migrate(obj, &ww,
>>>> INTEL_REGION_SMEM);
>>>> +               if (err)
>>>> +                       continue;
>>>> +
>>>> +               err = i915_gem_object_pin_pages(obj);
>>>> +               if (err)
>>>> +                       continue;
>>>> +
>>>> +               if (i915_gem_object_can_migrate(obj,
>>>> INTEL_REGION_LMEM))
>>>> +                       err = -EINVAL;
>>>> +
>>>> +               i915_gem_object_unpin_pages(obj);
>>>> +       }
>>>> +       i915_gem_object_put(obj);
>>>> +
>>>> +       return err;
>>>> +}
>>>> +
>>>> +static int igt_lmem_create_migrate(void *arg)
>>>> +{
>>>> +       struct intel_gt *gt = arg;
>>>> +       struct drm_i915_private *i915 = gt->i915;
>>>> +       struct drm_i915_gem_object *obj;
>>>> +       struct i915_gem_ww_ctx ww;
>>>> +       int err = 0;
>>>> +
>>>> +       /* Switch object backing-store on create */
>>>> +       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
>>>> +       if (IS_ERR(obj))
>>>> +               return PTR_ERR(obj);
>>>> +
>>>> +       for_i915_gem_ww(&ww, err, true) {
>>>> +               err = i915_gem_object_lock(obj, &ww);
>>>> +               if (err)
>>>> +                       continue;
>>>> +
>>>> +               if (!i915_gem_object_can_migrate(obj,
>>>> INTEL_REGION_LMEM)) {
>>>> +                       err = -EINVAL;
>>>> +                       continue;
>>>> +               }
>>>> +
>>>> +               err = i915_gem_object_migrate(obj, &ww,
>>>> INTEL_REGION_LMEM);
>>>> +               if (err)
>>>> +                       continue;
>>>> +
>>>> +               err = i915_gem_object_pin_pages(obj);
>>>> +               if (err)
>>>> +                       continue;
>>>> +
>>>> +               if (i915_gem_object_can_migrate(obj,
>>>> INTEL_REGION_SMEM))
>>>> +                       err = -EINVAL;
>>>> +
>>>> +               i915_gem_object_unpin_pages(obj);
>>>> +       }
>>>> +       i915_gem_object_put(obj);
>>>> +
>>>> +       return err;
>>>> +}
>>>> +
>>>> +static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
>>>> +                                 struct drm_i915_gem_object *obj)
>>>> +{
>>>> +       int err;
>>>> +
>>>> +       err = i915_gem_object_lock(obj, ww);
>>>> +       if (err)
>>>> +               return err;
>>>> +
>>>> +       err = i915_gem_object_wait(obj,
>>>> +                                  I915_WAIT_INTERRUPTIBLE |
>>>> +                                  I915_WAIT_PRIORITY |
>>>> +                                  I915_WAIT_ALL,
>>>> +                                  MAX_SCHEDULE_TIMEOUT);
>>>> +       if (err)
>>>> +               return err;
>>>> +
>>>> +       if (i915_gem_object_is_lmem(obj)) {
>>>> +               if (!i915_gem_object_can_migrate(obj,
>>>> INTEL_REGION_SMEM)) {
>>> I don't see any  testing of the parameter num_allowed.
>>>
>>> Is that done somewhere else?
>>>
>>> Mike
>> That's a user-space submitted parameter only, dictating what region the
>> object is allowed in when bound to the GPU. It's not exercised in this
>> selftest.
> Got it.
>
> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
>
> M

Thanks!

Thomas


>> Thanks,
>> Thomas
>>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1c18be067b58..ff147fd59874 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -649,6 +649,7 @@  static const struct drm_gem_object_funcs i915_gem_object_funcs = {
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/huge_gem_object.c"
 #include "selftests/huge_pages.c"
+#include "selftests/i915_gem_migrate.c"
 #include "selftests/i915_gem_object.c"
 #include "selftests/i915_gem_coherency.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
new file mode 100644
index 000000000000..a437b66f64d9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -0,0 +1,237 @@ 
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "gt/intel_migrate.h"
+
+static int igt_smem_create_migrate(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct drm_i915_private *i915 = gt->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_ww_ctx ww;
+	int err = 0;
+
+	/* Switch object backing-store on create */
+	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	for_i915_gem_ww(&ww, err, true) {
+		err = i915_gem_object_lock(obj, &ww);
+		if (err)
+			continue;
+
+		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM)) {
+			err = -EINVAL;
+			continue;
+		}
+
+		err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
+		if (err)
+			continue;
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			continue;
+
+		if (i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM))
+			err = -EINVAL;
+
+		i915_gem_object_unpin_pages(obj);
+	}
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
+static int igt_lmem_create_migrate(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct drm_i915_private *i915 = gt->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_ww_ctx ww;
+	int err = 0;
+
+	/* Switch object backing-store on create */
+	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	for_i915_gem_ww(&ww, err, true) {
+		err = i915_gem_object_lock(obj, &ww);
+		if (err)
+			continue;
+
+		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
+			err = -EINVAL;
+			continue;
+		}
+
+		err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+		if (err)
+			continue;
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			continue;
+
+		if (i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
+			err = -EINVAL;
+
+		i915_gem_object_unpin_pages(obj);
+	}
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
+static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
+				  struct drm_i915_gem_object *obj)
+{
+	int err;
+
+	err = i915_gem_object_lock(obj, ww);
+	if (err)
+		return err;
+
+	err = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_PRIORITY |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT);
+	if (err)
+		return err;
+
+	if (i915_gem_object_is_lmem(obj)) {
+		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM)) {
+			pr_err("object can't migrate to smem.\n");
+			return -EINVAL;
+		}
+
+		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
+		if (err) {
+			pr_err("Object failed migration to smem\n");
+			if (err)
+				return err;
+		}
+
+		if (i915_gem_object_is_lmem(obj)) {
+			pr_err("object still backed by lmem\n");
+			err = -EINVAL;
+		}
+
+		if (!i915_gem_object_has_struct_page(obj)) {
+			pr_err("object not backed by struct page\n");
+			err = -EINVAL;
+		}
+
+	} else {
+		if (!i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
+			pr_err("object can't migrate to lmem.\n");
+			return -EINVAL;
+		}
+
+		err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
+		if (err) {
+			pr_err("Object failed migration to lmem\n");
+			if (err)
+				return err;
+		}
+
+		if (i915_gem_object_has_struct_page(obj)) {
+			pr_err("object still backed by struct page\n");
+			err = -EINVAL;
+		}
+
+		if (!i915_gem_object_is_lmem(obj)) {
+			pr_err("object not backed by lmem\n");
+			err = -EINVAL;
+		}
+	}
+
+	return err;
+}
+
+static int igt_lmem_pages_migrate(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct drm_i915_private *i915 = gt->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_ww_ctx ww;
+	struct i915_request *rq;
+	int err;
+	int i;
+
+	/* From LMEM to shmem and back again */
+
+	obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = i915_gem_object_lock(obj, NULL);
+	if (err)
+		goto out_put;
+
+	err = ____i915_gem_object_get_pages(obj);
+	if (err) {
+		i915_gem_object_unlock(obj);
+		goto out_put;
+	}
+
+	err = intel_context_migrate_clear(gt->migrate.context, NULL,
+					  obj->mm.pages->sgl, obj->cache_level,
+					  i915_gem_object_is_lmem(obj),
+					  0, &rq);
+	if (rq) {
+		dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+		i915_request_put(rq);
+	}
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto out_put;
+
+	for (i = 1; i <= 4; ++i) {
+		for_i915_gem_ww(&ww, err, true) {
+			err = lmem_pages_migrate_one(&ww, obj);
+			if (err)
+				continue;
+
+			err = i915_gem_object_wait_migration(obj, true);
+			if (err)
+				continue;
+
+			err = intel_migrate_clear(&gt->migrate, &ww, NULL,
+						  obj->mm.pages->sgl,
+						  obj->cache_level,
+						  i915_gem_object_is_lmem(obj),
+						  0xdeadbeaf, &rq);
+			if (rq) {
+				dma_resv_add_excl_fence(obj->base.resv,
+							&rq->fence);
+				i915_request_put(rq);
+			}
+		}
+		if (err)
+			break;
+	}
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
+int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_smem_create_migrate),
+		SUBTEST(igt_lmem_create_migrate),
+		SUBTEST(igt_lmem_pages_migrate),
+	};
+
+	if (!HAS_LMEM(i915))
+		return 0;
+
+	return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index a68197cf1044..e2fd1b61af71 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -40,6 +40,7 @@  selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(gem_contexts, i915_gem_context_live_selftests)
 selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
+selftest(gem_migrate, i915_gem_migrate_live_selftests)
 selftest(reset, intel_reset_live_selftests)
 selftest(memory_region, intel_memory_region_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)