diff mbox series

[7/7] drm/i915/gem: Migrate to system at dma-buf attach time (v6)

Message ID 20210716141426.1904528-8-jason@jlekstrand.net (mailing list archive)
State New, archived
Headers show
Series drm/i915: Migrate memory to SMEM when imported cross-device (v7) | expand

Commit Message

Jason Ekstrand July 16, 2021, 2:14 p.m. UTC
From: Thomas Hellström <thomas.hellstrom@linux.intel.com>

Until we support p2p dma or as a complement to that, migrate data
to system memory at dma-buf attach time if possible.

v2:
- Rebase on dynamic exporter. Update the igt_dmabuf_import_same_driver
  selftest to migrate if we are LMEM capable.
v3:
- Migrate also in the pin() callback.
v4:
- Migrate in attach
v5: (jason)
- Lock around the migration
v6: (jason)
- Move the can_migrate check outside the lock
- Rework the selftests to test more migration conditions.  In
  particular, SMEM, LMEM, and LMEM+SMEM are all checked.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
---
 drivers/gpu/drm/i915/gem/i915_gem_create.c    |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c    | 23 ++++-
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  4 +
 .../drm/i915/gem/selftests/i915_gem_dmabuf.c  | 89 ++++++++++++++++++-
 4 files changed, 112 insertions(+), 6 deletions(-)

Comments

Matthew Auld July 20, 2021, 10:53 a.m. UTC | #1
On Fri, 16 Jul 2021 at 15:14, Jason Ekstrand <jason@jlekstrand.net> wrote:
>
> From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>
> Until we support p2p dma or as a complement to that, migrate data
> to system memory at dma-buf attach time if possible.
>
> v2:
> - Rebase on dynamic exporter. Update the igt_dmabuf_import_same_driver
>   selftest to migrate if we are LMEM capable.
> v3:
> - Migrate also in the pin() callback.
> v4:
> - Migrate in attach
> v5: (jason)
> - Lock around the migration
> v6: (jason)
> - Move the can_migrate check outside the lock
> - Rework the selftests to test more migration conditions.  In
>   particular, SMEM, LMEM, and LMEM+SMEM are all checked.
>
> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
> Reported-by: kernel test robot <lkp@intel.com>
> Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
> Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_create.c    |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c    | 23 ++++-
>  drivers/gpu/drm/i915/gem/i915_gem_object.h    |  4 +
>  .../drm/i915/gem/selftests/i915_gem_dmabuf.c  | 89 ++++++++++++++++++-
>  4 files changed, 112 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> index 039e4f3b39c79..41c4cd3e1ea01 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> @@ -82,7 +82,7 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj,
>         return 0;
>  }
>
> -static struct drm_i915_gem_object *
> +struct drm_i915_gem_object *
>  i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
>                             struct intel_memory_region **placements,
>                             unsigned int n_placements)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> index 9a655f69a0671..5d438b95826b9 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> @@ -170,8 +170,29 @@ static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
>                                   struct dma_buf_attachment *attach)
>  {
>         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
> +       struct i915_gem_ww_ctx ww;
> +       int err;
> +
> +       if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
> +               return -EOPNOTSUPP;
> +
> +       for_i915_gem_ww(&ww, err, true) {
> +               err = i915_gem_object_lock(obj, &ww);
> +               if (err)
> +                       continue;
> +
> +               err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
> +               if (err)
> +                       continue;
>
> -       return i915_gem_object_pin_pages_unlocked(obj);
> +               err = i915_gem_object_wait_migration(obj, 0);
> +               if (err)
> +                       continue;
> +
> +               err = i915_gem_object_pin_pages(obj);
> +       }
> +
> +       return err;
>  }
>
>  static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index 8be4fadeee487..fbae53bd46384 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -61,6 +61,10 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
>  struct drm_i915_gem_object *
>  i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
>                                        const void *data, resource_size_t size);
> +struct drm_i915_gem_object *
> +i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
> +                           struct intel_memory_region **placements,
> +                           unsigned int n_placements);
>
>  extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
>
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> index 4451bbb4917e4..7b7647e7e220a 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> @@ -85,9 +85,62 @@ static int igt_dmabuf_import_self(void *arg)
>         return err;
>  }
>
> -static int igt_dmabuf_import_same_driver(void *arg)
> +static int igt_dmabuf_import_same_driver_lmem(void *arg)
>  {
>         struct drm_i915_private *i915 = arg;
> +       struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM];
> +       struct drm_i915_gem_object *obj;
> +       struct drm_gem_object *import;
> +       struct dma_buf *dmabuf;
> +       int err;
> +
> +       if (!i915->mm.regions[INTEL_REGION_LMEM])

!lmem

> +               return 0;
> +
> +       force_different_devices = true;
> +
> +       obj = i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
> +       if (IS_ERR(obj)) {
> +               pr_err("i915_gem_object_create_user failed with err=%d\n",
> +                      (int)PTR_ERR(dmabuf));

PTR_ERR(obj)

> +               err = PTR_ERR(obj);
> +               goto out_ret;
> +       }
> +
> +       dmabuf = i915_gem_prime_export(&obj->base, 0);
> +       if (IS_ERR(dmabuf)) {
> +               pr_err("i915_gem_prime_export failed with err=%d\n",
> +                      (int)PTR_ERR(dmabuf));
> +               err = PTR_ERR(dmabuf);
> +               goto out;
> +       }
> +
> +       /* We expect an import of an LMEM-only object to fail with
> +        * -EOPNOTSUPP because it can't be migrated to SMEM.
> +        */

/*
 * We expect...
 */

> +       import = i915_gem_prime_import(&i915->drm, dmabuf);
> +       if (!IS_ERR(import)) {
> +               drm_gem_object_put(import);
> +               pr_err("i915_gem_prime_import succeeded when it shouldn't have\n");
> +               err = -EINVAL;
> +       } else if (PTR_ERR(import) != -EOPNOTSUPP) {
> +               pr_err("i915_gem_prime_import failed with the wrong err=%d\n",
> +                      (int)PTR_ERR(import));
> +               err = PTR_ERR(import);
> +       }
> +
> +       dma_buf_put(dmabuf);
> +out:
> +       i915_gem_object_put(obj);
> +out_ret:
> +       force_different_devices = false;
> +       return err;
> +}
> +
> +static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
> +                                        struct intel_memory_region **regions,
> +                                        unsigned int num_regions)
> +{
>         struct drm_i915_gem_object *obj, *import_obj;
>         struct drm_gem_object *import;
>         struct dma_buf *dmabuf;
> @@ -97,9 +150,15 @@ static int igt_dmabuf_import_same_driver(void *arg)
>         int err;
>
>         force_different_devices = true;
> -       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
> -       if (IS_ERR(obj))
> +
> +       obj = i915_gem_object_create_user(i915, PAGE_SIZE,
> +                                         regions, num_regions);
> +       if (IS_ERR(obj)) {
> +               pr_err("i915_gem_object_create_user failed with err=%d\n",
> +                      (int)PTR_ERR(dmabuf));

PTR_ERR(obj)

> +               err = PTR_ERR(obj);
>                 goto out_ret;
> +       }
>
>         dmabuf = i915_gem_prime_export(&obj->base, 0);
>         if (IS_ERR(dmabuf)) {
> @@ -174,6 +233,26 @@ static int igt_dmabuf_import_same_driver(void *arg)
>         return err;
>  }
>
> +static int igt_dmabuf_import_same_driver_smem(void *arg)
> +{
> +       struct drm_i915_private *i915 = arg;
> +       struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];

Newline.

> +       return igt_dmabuf_import_same_driver(i915, &smem, 1);
> +}
> +
> +static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
> +{
> +       struct drm_i915_private *i915 = arg;
> +       struct intel_memory_region *regions[2];
> +
> +       if (!i915->mm.regions[INTEL_REGION_LMEM])
> +               return 0;
> +
> +       regions[0] = i915->mm.regions[INTEL_REGION_LMEM];
> +       regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
> +       return igt_dmabuf_import_same_driver(i915, regions, 2);
> +}
> +
>  static int igt_dmabuf_import(void *arg)
>  {
>         struct drm_i915_private *i915 = arg;
> @@ -384,7 +463,9 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
>  {
>         static const struct i915_subtest tests[] = {
>                 SUBTEST(igt_dmabuf_export),
> -               SUBTEST(igt_dmabuf_import_same_driver),
> +               SUBTEST(igt_dmabuf_import_same_driver_lmem),
> +               SUBTEST(igt_dmabuf_import_same_driver_smem),
> +               SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
>         };
>
>         return i915_subtests(tests, i915);
> --
> 2.31.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Jason Ekstrand July 20, 2021, 9:40 p.m. UTC | #2
Fixed all the nits below locally.  It'll be in the next send.

On Tue, Jul 20, 2021 at 5:53 AM Matthew Auld
<matthew.william.auld@gmail.com> wrote:
>
> On Fri, 16 Jul 2021 at 15:14, Jason Ekstrand <jason@jlekstrand.net> wrote:
> >
> > From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> >
> > Until we support p2p dma or as a complement to that, migrate data
> > to system memory at dma-buf attach time if possible.
> >
> > v2:
> > - Rebase on dynamic exporter. Update the igt_dmabuf_import_same_driver
> >   selftest to migrate if we are LMEM capable.
> > v3:
> > - Migrate also in the pin() callback.
> > v4:
> > - Migrate in attach
> > v5: (jason)
> > - Lock around the migration
> > v6: (jason)
> > - Move the can_migrate check outside the lock
> > - Rework the selftests to test more migration conditions.  In
> >   particular, SMEM, LMEM, and LMEM+SMEM are all checked.
> >
> > Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
> > Reported-by: kernel test robot <lkp@intel.com>
> > Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
> > Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
> > ---
> >  drivers/gpu/drm/i915/gem/i915_gem_create.c    |  2 +-
> >  drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c    | 23 ++++-
> >  drivers/gpu/drm/i915/gem/i915_gem_object.h    |  4 +
> >  .../drm/i915/gem/selftests/i915_gem_dmabuf.c  | 89 ++++++++++++++++++-
> >  4 files changed, 112 insertions(+), 6 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> > index 039e4f3b39c79..41c4cd3e1ea01 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> > @@ -82,7 +82,7 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj,
> >         return 0;
> >  }
> >
> > -static struct drm_i915_gem_object *
> > +struct drm_i915_gem_object *
> >  i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
> >                             struct intel_memory_region **placements,
> >                             unsigned int n_placements)
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > index 9a655f69a0671..5d438b95826b9 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > @@ -170,8 +170,29 @@ static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
> >                                   struct dma_buf_attachment *attach)
> >  {
> >         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
> > +       struct i915_gem_ww_ctx ww;
> > +       int err;
> > +
> > +       if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
> > +               return -EOPNOTSUPP;
> > +
> > +       for_i915_gem_ww(&ww, err, true) {
> > +               err = i915_gem_object_lock(obj, &ww);
> > +               if (err)
> > +                       continue;
> > +
> > +               err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
> > +               if (err)
> > +                       continue;
> >
> > -       return i915_gem_object_pin_pages_unlocked(obj);
> > +               err = i915_gem_object_wait_migration(obj, 0);
> > +               if (err)
> > +                       continue;
> > +
> > +               err = i915_gem_object_pin_pages(obj);
> > +       }
> > +
> > +       return err;
> >  }
> >
> >  static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> > index 8be4fadeee487..fbae53bd46384 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> > @@ -61,6 +61,10 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
> >  struct drm_i915_gem_object *
> >  i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
> >                                        const void *data, resource_size_t size);
> > +struct drm_i915_gem_object *
> > +i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
> > +                           struct intel_memory_region **placements,
> > +                           unsigned int n_placements);
> >
> >  extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
> >
> > diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> > index 4451bbb4917e4..7b7647e7e220a 100644
> > --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> > +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
> > @@ -85,9 +85,62 @@ static int igt_dmabuf_import_self(void *arg)
> >         return err;
> >  }
> >
> > -static int igt_dmabuf_import_same_driver(void *arg)
> > +static int igt_dmabuf_import_same_driver_lmem(void *arg)
> >  {
> >         struct drm_i915_private *i915 = arg;
> > +       struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM];
> > +       struct drm_i915_gem_object *obj;
> > +       struct drm_gem_object *import;
> > +       struct dma_buf *dmabuf;
> > +       int err;
> > +
> > +       if (!i915->mm.regions[INTEL_REGION_LMEM])
>
> !lmem
>
> > +               return 0;
> > +
> > +       force_different_devices = true;
> > +
> > +       obj = i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
> > +       if (IS_ERR(obj)) {
> > +               pr_err("i915_gem_object_create_user failed with err=%d\n",
> > +                      (int)PTR_ERR(dmabuf));
>
> PTR_ERR(obj)
>
> > +               err = PTR_ERR(obj);
> > +               goto out_ret;
> > +       }
> > +
> > +       dmabuf = i915_gem_prime_export(&obj->base, 0);
> > +       if (IS_ERR(dmabuf)) {
> > +               pr_err("i915_gem_prime_export failed with err=%d\n",
> > +                      (int)PTR_ERR(dmabuf));
> > +               err = PTR_ERR(dmabuf);
> > +               goto out;
> > +       }
> > +
> > +       /* We expect an import of an LMEM-only object to fail with
> > +        * -EOPNOTSUPP because it can't be migrated to SMEM.
> > +        */
>
> /*
>  * We expect...
>  */
>
> > +       import = i915_gem_prime_import(&i915->drm, dmabuf);
> > +       if (!IS_ERR(import)) {
> > +               drm_gem_object_put(import);
> > +               pr_err("i915_gem_prime_import succeeded when it shouldn't have\n");
> > +               err = -EINVAL;
> > +       } else if (PTR_ERR(import) != -EOPNOTSUPP) {
> > +               pr_err("i915_gem_prime_import failed with the wrong err=%d\n",
> > +                      (int)PTR_ERR(import));
> > +               err = PTR_ERR(import);
> > +       }
> > +
> > +       dma_buf_put(dmabuf);
> > +out:
> > +       i915_gem_object_put(obj);
> > +out_ret:
> > +       force_different_devices = false;
> > +       return err;
> > +}
> > +
> > +static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
> > +                                        struct intel_memory_region **regions,
> > +                                        unsigned int num_regions)
> > +{
> >         struct drm_i915_gem_object *obj, *import_obj;
> >         struct drm_gem_object *import;
> >         struct dma_buf *dmabuf;
> > @@ -97,9 +150,15 @@ static int igt_dmabuf_import_same_driver(void *arg)
> >         int err;
> >
> >         force_different_devices = true;
> > -       obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
> > -       if (IS_ERR(obj))
> > +
> > +       obj = i915_gem_object_create_user(i915, PAGE_SIZE,
> > +                                         regions, num_regions);
> > +       if (IS_ERR(obj)) {
> > +               pr_err("i915_gem_object_create_user failed with err=%d\n",
> > +                      (int)PTR_ERR(dmabuf));
>
> PTR_ERR(obj)
>
> > +               err = PTR_ERR(obj);
> >                 goto out_ret;
> > +       }
> >
> >         dmabuf = i915_gem_prime_export(&obj->base, 0);
> >         if (IS_ERR(dmabuf)) {
> > @@ -174,6 +233,26 @@ static int igt_dmabuf_import_same_driver(void *arg)
> >         return err;
> >  }
> >
> > +static int igt_dmabuf_import_same_driver_smem(void *arg)
> > +{
> > +       struct drm_i915_private *i915 = arg;
> > +       struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];
>
> Newline.
>
> > +       return igt_dmabuf_import_same_driver(i915, &smem, 1);
> > +}
> > +
> > +static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
> > +{
> > +       struct drm_i915_private *i915 = arg;
> > +       struct intel_memory_region *regions[2];
> > +
> > +       if (!i915->mm.regions[INTEL_REGION_LMEM])
> > +               return 0;
> > +
> > +       regions[0] = i915->mm.regions[INTEL_REGION_LMEM];
> > +       regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
> > +       return igt_dmabuf_import_same_driver(i915, regions, 2);
> > +}
> > +
> >  static int igt_dmabuf_import(void *arg)
> >  {
> >         struct drm_i915_private *i915 = arg;
> > @@ -384,7 +463,9 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
> >  {
> >         static const struct i915_subtest tests[] = {
> >                 SUBTEST(igt_dmabuf_export),
> > -               SUBTEST(igt_dmabuf_import_same_driver),
> > +               SUBTEST(igt_dmabuf_import_same_driver_lmem),
> > +               SUBTEST(igt_dmabuf_import_same_driver_smem),
> > +               SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
> >         };
> >
> >         return i915_subtests(tests, i915);
> > --
> > 2.31.1
> >
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 039e4f3b39c79..41c4cd3e1ea01 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -82,7 +82,7 @@  static int i915_gem_publish(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
-static struct drm_i915_gem_object *
+struct drm_i915_gem_object *
 i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
 			    struct intel_memory_region **placements,
 			    unsigned int n_placements)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 9a655f69a0671..5d438b95826b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -170,8 +170,29 @@  static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
 				  struct dma_buf_attachment *attach)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+	struct i915_gem_ww_ctx ww;
+	int err;
+
+	if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
+		return -EOPNOTSUPP;
+
+	for_i915_gem_ww(&ww, err, true) {
+		err = i915_gem_object_lock(obj, &ww);
+		if (err)
+			continue;
+
+		err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
+		if (err)
+			continue;
 
-	return i915_gem_object_pin_pages_unlocked(obj);
+		err = i915_gem_object_wait_migration(obj, 0);
+		if (err)
+			continue;
+
+		err = i915_gem_object_pin_pages(obj);
+	}
+
+	return err;
 }
 
 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 8be4fadeee487..fbae53bd46384 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -61,6 +61,10 @@  i915_gem_object_create_shmem(struct drm_i915_private *i915,
 struct drm_i915_gem_object *
 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
 				       const void *data, resource_size_t size);
+struct drm_i915_gem_object *
+i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
+			    struct intel_memory_region **placements,
+			    unsigned int n_placements);
 
 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 4451bbb4917e4..7b7647e7e220a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -85,9 +85,62 @@  static int igt_dmabuf_import_self(void *arg)
 	return err;
 }
 
-static int igt_dmabuf_import_same_driver(void *arg)
+static int igt_dmabuf_import_same_driver_lmem(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
+	struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM];
+	struct drm_i915_gem_object *obj;
+	struct drm_gem_object *import;
+	struct dma_buf *dmabuf;
+	int err;
+
+	if (!i915->mm.regions[INTEL_REGION_LMEM])
+		return 0;
+
+	force_different_devices = true;
+
+	obj = i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
+	if (IS_ERR(obj)) {
+		pr_err("i915_gem_object_create_user failed with err=%d\n",
+		       (int)PTR_ERR(dmabuf));
+		err = PTR_ERR(obj);
+		goto out_ret;
+	}
+
+	dmabuf = i915_gem_prime_export(&obj->base, 0);
+	if (IS_ERR(dmabuf)) {
+		pr_err("i915_gem_prime_export failed with err=%d\n",
+		       (int)PTR_ERR(dmabuf));
+		err = PTR_ERR(dmabuf);
+		goto out;
+	}
+
+	/* We expect an import of an LMEM-only object to fail with
+	 * -EOPNOTSUPP because it can't be migrated to SMEM.
+	 */
+	import = i915_gem_prime_import(&i915->drm, dmabuf);
+	if (!IS_ERR(import)) {
+		drm_gem_object_put(import);
+		pr_err("i915_gem_prime_import succeeded when it shouldn't have\n");
+		err = -EINVAL;
+	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
+		pr_err("i915_gem_prime_import failed with the wrong err=%d\n",
+		       (int)PTR_ERR(import));
+		err = PTR_ERR(import);
+	}
+
+	dma_buf_put(dmabuf);
+out:
+	i915_gem_object_put(obj);
+out_ret:
+	force_different_devices = false;
+	return err;
+}
+
+static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
+					 struct intel_memory_region **regions,
+					 unsigned int num_regions)
+{
 	struct drm_i915_gem_object *obj, *import_obj;
 	struct drm_gem_object *import;
 	struct dma_buf *dmabuf;
@@ -97,9 +150,15 @@  static int igt_dmabuf_import_same_driver(void *arg)
 	int err;
 
 	force_different_devices = true;
-	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
-	if (IS_ERR(obj))
+
+	obj = i915_gem_object_create_user(i915, PAGE_SIZE,
+					  regions, num_regions);
+	if (IS_ERR(obj)) {
+		pr_err("i915_gem_object_create_user failed with err=%d\n",
+		       (int)PTR_ERR(dmabuf));
+		err = PTR_ERR(obj);
 		goto out_ret;
+	}
 
 	dmabuf = i915_gem_prime_export(&obj->base, 0);
 	if (IS_ERR(dmabuf)) {
@@ -174,6 +233,26 @@  static int igt_dmabuf_import_same_driver(void *arg)
 	return err;
 }
 
+static int igt_dmabuf_import_same_driver_smem(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];
+	return igt_dmabuf_import_same_driver(i915, &smem, 1);
+}
+
+static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_memory_region *regions[2];
+
+	if (!i915->mm.regions[INTEL_REGION_LMEM])
+		return 0;
+
+	regions[0] = i915->mm.regions[INTEL_REGION_LMEM];
+	regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
+	return igt_dmabuf_import_same_driver(i915, regions, 2);
+}
+
 static int igt_dmabuf_import(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -384,7 +463,9 @@  int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_dmabuf_export),
-		SUBTEST(igt_dmabuf_import_same_driver),
+		SUBTEST(igt_dmabuf_import_same_driver_lmem),
+		SUBTEST(igt_dmabuf_import_same_driver_smem),
+		SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
 	};
 
 	return i915_subtests(tests, i915);