diff mbox series

[RFC,092/162] drm/i915/uapi: introduce drm_i915_gem_create_ext

Message ID 20201127120718.454037-93-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series DG1 + LMEM enabling | expand

Commit Message

Matthew Auld Nov. 27, 2020, 12:06 p.m. UTC
Same old gem_create but with now with extensions support. This is needed
to support various upcoming usecases. For now we use the extensions
mechanism to support setting an immutable-priority-list of potential
placements, at creation time.

If we wish to set the placements/regions we can simply do:

struct drm_i915_gem_object_param region_param = { … }; /* Unchanged */
struct drm_i915_gem_create_ext_setparam setparam_region = {
    .base = { .name = I915_GEM_CREATE_EXT_SETPARAM },
    .param = region_param,
}

struct drm_i915_gem_create_ext create_ext = {
	.size = 16 * PAGE_SIZE,
	.extensions = (uintptr_t)&setparam_region,
};
int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
if (err) ...

If we use the normal gem_create or gem_create_ext without the
extensions/placements then we still get the old behaviour with only
placing the object in system memory.

One important change here is the returned size will now be rounded up to
the correct size, depending on the list of placements, where we might
have minimum page-size restrictions on some platforms when dealing with
device local-memory.

Also, we still keep around the i915_gem_object_setparam ioctl, although
that is now restricted by the placement list(i.e we are not allowed to
add new placements), and longer term that will be going away wrt setting
placements, since it was deemed that the kernel doesn't need to support
a dynamic list of placements, which is now solidified by this uapi
change.

Testcase: igt/gem_create/create-ext-placement-sanity-check
Testcase: igt/gem_create/create-ext-placement-each
Testcase: igt/gem_create/create-ext-placement-all
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: CQ Tang <cq.tang@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/Makefile                 |   1 +
 drivers/gpu/drm/i915/gem/i915_gem_create.c    | 398 ++++++++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   2 +
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c    |   4 +
 drivers/gpu/drm/i915/i915_drv.c               |   2 +-
 drivers/gpu/drm/i915/i915_gem.c               | 103 +----
 drivers/gpu/drm/i915/intel_memory_region.c    |  20 +
 drivers/gpu/drm/i915/intel_memory_region.h    |   4 +
 include/uapi/drm/i915_drm.h                   |  60 +++
 10 files changed, 500 insertions(+), 103 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_create.c

Comments

Chris Wilson Nov. 27, 2020, 1:25 p.m. UTC | #1
Quoting Matthew Auld (2020-11-27 12:06:08)
> Same old gem_create but with now with extensions support. This is needed
> to support various upcoming usecases. For now we use the extensions
> mechanism to support setting an immutable-priority-list of potential
> placements, at creation time.
> 
> If we wish to set the placements/regions we can simply do:
> 
> struct drm_i915_gem_object_param region_param = { … }; /* Unchanged */
> struct drm_i915_gem_create_ext_setparam setparam_region = {
>     .base = { .name = I915_GEM_CREATE_EXT_SETPARAM },
>     .param = region_param,
> }
> 
> struct drm_i915_gem_create_ext create_ext = {
>         .size = 16 * PAGE_SIZE,
>         .extensions = (uintptr_t)&setparam_region,
> };
> int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
> if (err) ...
> 
> If we use the normal gem_create or gem_create_ext without the
> extensions/placements then we still get the old behaviour with only
> placing the object in system memory.
> 
> One important change here is the returned size will now be rounded up to
> the correct size, depending on the list of placements, where we might
> have minimum page-size restrictions on some platforms when dealing with
> device local-memory.
> 
> Also, we still keep around the i915_gem_object_setparam ioctl, although
> that is now restricted by the placement list(i.e we are not allowed to
> add new placements), and longer term that will be going away wrt setting
> placements, since it was deemed that the kernel doesn't need to support
> a dynamic list of placements, which is now solidified by this uapi
> change.
> 
> Testcase: igt/gem_create/create-ext-placement-sanity-check
> Testcase: igt/gem_create/create-ext-placement-each
> Testcase: igt/gem_create/create-ext-placement-all
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Signed-off-by: CQ Tang <cq.tang@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/Makefile                 |   1 +
>  drivers/gpu/drm/i915/gem/i915_gem_create.c    | 398 ++++++++++++++++++
>  drivers/gpu/drm/i915/gem/i915_gem_object.c    |   2 +
>  .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +
>  drivers/gpu/drm/i915/gem/i915_gem_region.c    |   4 +
>  drivers/gpu/drm/i915/i915_drv.c               |   2 +-
>  drivers/gpu/drm/i915/i915_gem.c               | 103 +----
>  drivers/gpu/drm/i915/intel_memory_region.c    |  20 +
>  drivers/gpu/drm/i915/intel_memory_region.h    |   4 +
>  include/uapi/drm/i915_drm.h                   |  60 +++
>  10 files changed, 500 insertions(+), 103 deletions(-)
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_create.c
> 
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index ec361d61230b..3955134feca7 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -134,6 +134,7 @@ gem-y += \
>         gem/i915_gem_clflush.o \
>         gem/i915_gem_client_blt.o \
>         gem/i915_gem_context.o \
> +       gem/i915_gem_create.o \
>         gem/i915_gem_dmabuf.o \
>         gem/i915_gem_domain.o \
>         gem/i915_gem_execbuffer.o \
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> new file mode 100644
> index 000000000000..6f6dd4f1ce7e
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
> @@ -0,0 +1,398 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2020 Intel Corporation
> + */
> +
> +#include "gem/i915_gem_ioctls.h"
> +#include "gem/i915_gem_lmem.h"
> +#include "gem/i915_gem_object_blt.h"
> +#include "gem/i915_gem_region.h"
> +
> +#include "i915_drv.h"
> +#include "i915_user_extensions.h"
> +
> +static u32 max_page_size(struct intel_memory_region **placements,
> +                        int n_placements)
> +{
> +       u32 max_page_size = 0;
> +       int i;
> +
> +       for (i = 0; i < n_placements; ++i) {
> +               max_page_size = max_t(u32, max_page_size,
> +                                     placements[i]->min_page_size);
> +       }
> +
> +       GEM_BUG_ON(!max_page_size);
> +       return max_page_size;
> +}
> +
> +static int
> +i915_gem_create(struct drm_file *file,
> +               struct intel_memory_region **placements,
> +               int n_placements,
> +               u64 *size_p,
> +               u32 *handle_p)
> +{
> +       struct drm_i915_gem_object *obj;
> +       u32 handle;
> +       u64 size;
> +       int ret;
> +
> +       size = round_up(*size_p, max_page_size(placements, n_placements));
> +       if (size == 0)
> +               return -EINVAL;
> +
> +       /* For most of the ABI (e.g. mmap) we think in system pages */
> +       GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
> +
> +       /* Allocate the new object */
> +       obj = i915_gem_object_create_region(placements[0], size, 0);
> +       if (IS_ERR(obj))
> +               return PTR_ERR(obj);
> +
> +       if (i915_gem_object_is_lmem(obj)) {
> +               struct intel_gt *gt = obj->mm.region->gt;
> +               struct intel_context *ce = gt->engine[BCS0]->blitter_context;
> +
> +               /*
> +                * XXX: We really want to move this to get_pages(), but we
> +                * require grabbing the BKL for the blitting operation which is
> +                * annoying. In the pipeline is support for async get_pages()
> +                * which should fit nicely for this. Also note that the actual
> +                * clear should be done async(we currently do an object_wait
> +                * which is pure garbage), we just need to take care if
> +                * userspace opts of implicit sync for the execbuf, to avoid any
> +                * potential info leak.
> +                */

Not just XXX, but the design should be completed first.
-Chris
Chris Wilson Nov. 27, 2020, 7:21 p.m. UTC | #2
Quoting Matthew Auld (2020-11-27 12:06:08)
> +int
> +i915_gem_create_ioctl(struct drm_device *dev, void *data,
> +                     struct drm_file *file)
> +{
> +       struct drm_i915_private *i915 = to_i915(dev);
> +       struct create_ext ext_data = { .i915 = i915 };
> +       struct drm_i915_gem_create_ext *args = data;
> +       int ret;
> +
> +       i915_gem_flush_free_objects(i915);
> +
> +       ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
> +                                  create_extensions,
> +                                  ARRAY_SIZE(create_extensions),
> +                                  &ext_data);
> +       if (ret)
> +               goto err_free;
> +
> +       if (!ext_data.placements) {
> +               struct intel_memory_region **placements;
> +               enum intel_memory_type mem_type = INTEL_MEMORY_SYSTEM;
> +
> +               placements = kmalloc(sizeof(struct intel_memory_region *),
> +                                    GFP_KERNEL);
> +               if (!placements)
> +                       return -ENOMEM;
> +
> +               placements[0] = intel_memory_region_by_type(i915, mem_type);
> +
> +               ext_data.placements = placements;
> +               ext_data.n_placements = 1;
> +       }
> +
> +       ret = i915_gem_create(file,
> +                             ext_data.placements,
> +                             ext_data.n_placements,
> +                             &args->size, &args->handle);
> +       if (!ret)
> +               return 0;

Applying the extensions has to happen after creating the vanilla object.

It literally is the equivalent of applying the setparam ioctl to a fresh
object.

Look at the PXP series for how badly wrong this goes if you try it this
way around.
-Chris
Chris Wilson Dec. 1, 2020, 12:55 p.m. UTC | #3
Quoting Matthew Auld (2020-11-27 12:06:08)
> Same old gem_create but with now with extensions support. This is needed
> to support various upcoming usecases. For now we use the extensions
> mechanism to support setting an immutable-priority-list of potential
> placements, at creation time.
> 
> If we wish to set the placements/regions we can simply do:
> 
> struct drm_i915_gem_object_param region_param = { … }; /* Unchanged */
> struct drm_i915_gem_create_ext_setparam setparam_region = {
>     .base = { .name = I915_GEM_CREATE_EXT_SETPARAM },
>     .param = region_param,
> }
> 
> struct drm_i915_gem_create_ext create_ext = {
>         .size = 16 * PAGE_SIZE,
>         .extensions = (uintptr_t)&setparam_region,
> };
> int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
> if (err) ...

Looking at the existing gem_create, there is no detection of an
unsupported extension. That is there is no rejection of new userspace
asking for placement on an old kernel. (As erroneous as that would be
for many other reasons.)

Unless I've missed something, we need a new ioctl number for CREATEv2.
-Chris
Matthew Auld Dec. 1, 2020, 1:43 p.m. UTC | #4
On 01/12/2020 12:55, Chris Wilson wrote:
> Quoting Matthew Auld (2020-11-27 12:06:08)
>> Same old gem_create but with now with extensions support. This is needed
>> to support various upcoming usecases. For now we use the extensions
>> mechanism to support setting an immutable-priority-list of potential
>> placements, at creation time.
>>
>> If we wish to set the placements/regions we can simply do:
>>
>> struct drm_i915_gem_object_param region_param = { … }; /* Unchanged */
>> struct drm_i915_gem_create_ext_setparam setparam_region = {
>>      .base = { .name = I915_GEM_CREATE_EXT_SETPARAM },
>>      .param = region_param,
>> }
>>
>> struct drm_i915_gem_create_ext create_ext = {
>>          .size = 16 * PAGE_SIZE,
>>          .extensions = (uintptr_t)&setparam_region,
>> };
>> int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
>> if (err) ...
> 
> Looking at the existing gem_create, there is no detection of an
> unsupported extension. That is there is no rejection of new userspace
> asking for placement on an old kernel. (As erroneous as that would be
> for many other reasons.)
> 
> Unless I've missed something, we need a new ioctl number for CREATEv2.

+Joonas

Right, and I guess it's not a good idea for userspace to implement 
something like has_gem_create_ext()?

> -Chris
>
Thomas Hellström (Intel) Dec. 1, 2020, 3:06 p.m. UTC | #5
On 11/27/20 2:25 PM, Chris Wilson wrote:
> Quoting Matthew Auld (2020-11-27 12:06:08)
>> Same old gem_create but with now with extensions support. This is needed
>> to support various upcoming usecases. For now we use the extensions
>> mechanism to support setting an immutable-priority-list of potential
>> placements, at creation time.
>>
>> If we wish to set the placements/regions we can simply do:
>>
>> struct drm_i915_gem_object_param region_param = { … }; /* Unchanged */
>> struct drm_i915_gem_create_ext_setparam setparam_region = {
>>      .base = { .name = I915_GEM_CREATE_EXT_SETPARAM },
>>      .param = region_param,
>> }
>>
>> struct drm_i915_gem_create_ext create_ext = {
>>          .size = 16 * PAGE_SIZE,
>>          .extensions = (uintptr_t)&setparam_region,
>> };
>> int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
>> if (err) ...
>>
>> If we use the normal gem_create or gem_create_ext without the
>> extensions/placements then we still get the old behaviour with only
>> placing the object in system memory.
>>
>> One important change here is the returned size will now be rounded up to
>> the correct size, depending on the list of placements, where we might
>> have minimum page-size restrictions on some platforms when dealing with
>> device local-memory.
>>
>> Also, we still keep around the i915_gem_object_setparam ioctl, although
>> that is now restricted by the placement list(i.e we are not allowed to
>> add new placements), and longer term that will be going away wrt setting
>> placements, since it was deemed that the kernel doesn't need to support
>> a dynamic list of placements, which is now solidified by this uapi
>> change.
>>
>> Testcase: igt/gem_create/create-ext-placement-sanity-check
>> Testcase: igt/gem_create/create-ext-placement-each
>> Testcase: igt/gem_create/create-ext-placement-all
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Signed-off-by: CQ Tang <cq.tang@intel.com>
>> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
>> ---
>>   drivers/gpu/drm/i915/Makefile                 |   1 +
>>   drivers/gpu/drm/i915/gem/i915_gem_create.c    | 398 ++++++++++++++++++
>>   drivers/gpu/drm/i915/gem/i915_gem_object.c    |   2 +
>>   .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +
>>   drivers/gpu/drm/i915/gem/i915_gem_region.c    |   4 +
>>   drivers/gpu/drm/i915/i915_drv.c               |   2 +-
>>   drivers/gpu/drm/i915/i915_gem.c               | 103 +----
>>   drivers/gpu/drm/i915/intel_memory_region.c    |  20 +
>>   drivers/gpu/drm/i915/intel_memory_region.h    |   4 +
>>   include/uapi/drm/i915_drm.h                   |  60 +++
>>   10 files changed, 500 insertions(+), 103 deletions(-)
>>   create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_create.c
>>
>> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
>> index ec361d61230b..3955134feca7 100644
>> --- a/drivers/gpu/drm/i915/Makefile
>> +++ b/drivers/gpu/drm/i915/Makefile
>> @@ -134,6 +134,7 @@ gem-y += \
>>          gem/i915_gem_clflush.o \
>>          gem/i915_gem_client_blt.o \
>>          gem/i915_gem_context.o \
>> +       gem/i915_gem_create.o \
>>          gem/i915_gem_dmabuf.o \
>>          gem/i915_gem_domain.o \
>>          gem/i915_gem_execbuffer.o \
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
>> new file mode 100644
>> index 000000000000..6f6dd4f1ce7e
>> --- /dev/null
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
>> @@ -0,0 +1,398 @@
>> +// SPDX-License-Identifier: MIT
>> +/*
>> + * Copyright © 2020 Intel Corporation
>> + */
>> +
>> +#include "gem/i915_gem_ioctls.h"
>> +#include "gem/i915_gem_lmem.h"
>> +#include "gem/i915_gem_object_blt.h"
>> +#include "gem/i915_gem_region.h"
>> +
>> +#include "i915_drv.h"
>> +#include "i915_user_extensions.h"
>> +
>> +static u32 max_page_size(struct intel_memory_region **placements,
>> +                        int n_placements)
>> +{
>> +       u32 max_page_size = 0;
>> +       int i;
>> +
>> +       for (i = 0; i < n_placements; ++i) {
>> +               max_page_size = max_t(u32, max_page_size,
>> +                                     placements[i]->min_page_size);
>> +       }
>> +
>> +       GEM_BUG_ON(!max_page_size);
>> +       return max_page_size;
>> +}
>> +
>> +static int
>> +i915_gem_create(struct drm_file *file,
>> +               struct intel_memory_region **placements,
>> +               int n_placements,
>> +               u64 *size_p,
>> +               u32 *handle_p)
>> +{
>> +       struct drm_i915_gem_object *obj;
>> +       u32 handle;
>> +       u64 size;
>> +       int ret;
>> +
>> +       size = round_up(*size_p, max_page_size(placements, n_placements));
>> +       if (size == 0)
>> +               return -EINVAL;
>> +
>> +       /* For most of the ABI (e.g. mmap) we think in system pages */
>> +       GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
>> +
>> +       /* Allocate the new object */
>> +       obj = i915_gem_object_create_region(placements[0], size, 0);
>> +       if (IS_ERR(obj))
>> +               return PTR_ERR(obj);
>> +
>> +       if (i915_gem_object_is_lmem(obj)) {
>> +               struct intel_gt *gt = obj->mm.region->gt;
>> +               struct intel_context *ce = gt->engine[BCS0]->blitter_context;
>> +
>> +               /*
>> +                * XXX: We really want to move this to get_pages(), but we
>> +                * require grabbing the BKL for the blitting operation which is
>> +                * annoying. In the pipeline is support for async get_pages()
>> +                * which should fit nicely for this. Also note that the actual
>> +                * clear should be done async(we currently do an object_wait
>> +                * which is pure garbage), we just need to take care if
>> +                * userspace opts of implicit sync for the execbuf, to avoid any
>> +                * potential info leak.
>> +                */
> Not just XXX, but the design should be completed first.

Matthew, I have a patch series in the makings that moves this blit to 
get_pages().

/Thomas
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ec361d61230b..3955134feca7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -134,6 +134,7 @@  gem-y += \
 	gem/i915_gem_clflush.o \
 	gem/i915_gem_client_blt.o \
 	gem/i915_gem_context.o \
+	gem/i915_gem_create.o \
 	gem/i915_gem_dmabuf.o \
 	gem/i915_gem_domain.o \
 	gem/i915_gem_execbuffer.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
new file mode 100644
index 000000000000..6f6dd4f1ce7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -0,0 +1,398 @@ 
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_object_blt.h"
+#include "gem/i915_gem_region.h"
+
+#include "i915_drv.h"
+#include "i915_user_extensions.h"
+
+static u32 max_page_size(struct intel_memory_region **placements,
+			 int n_placements)
+{
+	u32 max_page_size = 0;
+	int i;
+
+	for (i = 0; i < n_placements; ++i) {
+		max_page_size = max_t(u32, max_page_size,
+				      placements[i]->min_page_size);
+	}
+
+	GEM_BUG_ON(!max_page_size);
+	return max_page_size;
+}
+
+static int
+i915_gem_create(struct drm_file *file,
+		struct intel_memory_region **placements,
+		int n_placements,
+		u64 *size_p,
+		u32 *handle_p)
+{
+	struct drm_i915_gem_object *obj;
+	u32 handle;
+	u64 size;
+	int ret;
+
+	size = round_up(*size_p, max_page_size(placements, n_placements));
+	if (size == 0)
+		return -EINVAL;
+
+	/* For most of the ABI (e.g. mmap) we think in system pages */
+	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+	/* Allocate the new object */
+	obj = i915_gem_object_create_region(placements[0], size, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	if (i915_gem_object_is_lmem(obj)) {
+		struct intel_gt *gt = obj->mm.region->gt;
+		struct intel_context *ce = gt->engine[BCS0]->blitter_context;
+
+		/*
+		 * XXX: We really want to move this to get_pages(), but we
+		 * require grabbing the BKL for the blitting operation which is
+		 * annoying. In the pipeline is support for async get_pages()
+		 * which should fit nicely for this. Also note that the actual
+		 * clear should be done async(we currently do an object_wait
+		 * which is pure garbage), we just need to take care if
+		 * userspace opts of implicit sync for the execbuf, to avoid any
+		 * potential info leak.
+		 */
+
+retry:
+		ret = i915_gem_object_fill_blt(obj, ce, 0);
+		if (ret == -EINTR)
+			goto retry;
+		if (ret) {
+			/*
+			 * XXX: Post the error to where we would normally gather
+			 * and clear the pages. This better reflects the final
+			 * uapi behaviour, once we are at the point where we can
+			 * move the clear worker to get_pages().
+			 */
+			i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+			i915_gem_object_lock(obj, NULL);
+			__i915_gem_object_put_pages(obj);
+			i915_gem_object_unlock(obj);
+			obj->mm.gem_create_posted_err = ret;
+			goto handle_create;
+		}
+
+		/*
+		 * XXX: Occasionally i915_gem_object_wait() called inside
+		 * i915_gem_object_set_to_cpu_domain() get interrupted
+		 * and return -ERESTARTSYS, this will cause go clearing
+		 * code below and also set the gem_create_posted_err.
+		 * moreover, the clearing sometimes fails because the
+		 * object is still pinned by the blitter clearing code.
+		 * this makes us to have an object with or without lmem
+		 * pages, and with gem_create_posted_err = -ERESTARTSYS.
+		 * Under lmem pressure, if the object has pages, we might
+		 * swap out this object to smem. Next when user space
+		 * code use this object in gem_execbuf() call, get_pages()
+		 * operation will return -ERESTARTSYS error code, which
+		 * causes user space code to fail.
+		 *
+		 * To avoid this problem, we add a non-interruptible
+		 * wait before setting object to cpu domain.
+		 */
+		i915_gem_object_lock(obj, NULL);
+		ret = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+		if (!ret)
+			ret = i915_gem_object_set_to_cpu_domain(obj, false);
+		if (ret) {
+			i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+			__i915_gem_object_put_pages(obj);
+			obj->mm.gem_create_posted_err = ret;
+			i915_gem_object_unlock(obj);
+			goto handle_create;
+		}
+		i915_gem_object_unlock(obj);
+	}
+
+handle_create:
+	ret = drm_gem_handle_create(file, &obj->base, &handle);
+	/* drop reference from allocate - handle holds it now */
+	i915_gem_object_put(obj);
+	if (ret)
+		return ret;
+
+	obj->mm.placements = placements;
+	obj->mm.n_placements = n_placements;
+
+	*handle_p = handle;
+	*size_p = size;
+	return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+		     struct drm_device *dev,
+		     struct drm_mode_create_dumb *args)
+{
+	struct intel_memory_region **placements;
+	enum intel_memory_type mem_type;
+	int cpp = DIV_ROUND_UP(args->bpp, 8);
+	u32 format;
+	int ret;
+
+	switch (cpp) {
+	case 1:
+		format = DRM_FORMAT_C8;
+		break;
+	case 2:
+		format = DRM_FORMAT_RGB565;
+		break;
+	case 4:
+		format = DRM_FORMAT_XRGB8888;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* have to work out size/pitch and return them */
+	args->pitch = ALIGN(args->width * cpp, 64);
+
+	/* align stride to page size so that we can remap */
+	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+						    DRM_FORMAT_MOD_LINEAR))
+		args->pitch = ALIGN(args->pitch, 4096);
+
+	if (args->pitch < args->width)
+		return -EINVAL;
+
+	args->size = mul_u32_u32(args->pitch, args->height);
+
+	mem_type = INTEL_MEMORY_SYSTEM;
+	if (HAS_LMEM(to_i915(dev)))
+		mem_type = INTEL_MEMORY_LOCAL;
+
+	placements = kmalloc(sizeof(struct intel_memory_region *), GFP_KERNEL);
+	if (!placements)
+		return -ENOMEM;
+
+	placements[0] = intel_memory_region_by_type(to_i915(dev), mem_type);
+
+	ret = i915_gem_create(file,
+			      placements, 1,
+			      &args->size, &args->handle);
+	if (ret)
+		kfree(placements);
+
+	return ret;
+}
+
+struct create_ext {
+	struct drm_i915_private *i915;
+	struct intel_memory_region **placements;
+	int n_placements;
+};
+
+static void repr_placements(char *buf, size_t size,
+			    struct intel_memory_region **placements,
+			    int n_placements)
+{
+	int i;
+
+	buf[0] = '\0';
+
+	for (i = 0; i < n_placements; i++) {
+		struct intel_memory_region *mr = placements[i];
+		int r;
+
+		r = snprintf(buf, size, "\n  %s -> { class: %d, inst: %d }",
+			     mr->name, mr->type, mr->instance);
+		if (r >= size)
+			return;
+
+		buf += r;
+		size -= r;
+	}
+}
+
+static int set_placements(struct drm_i915_gem_object_param *args,
+			  struct create_ext *ext_data)
+{
+	struct drm_i915_private *i915 = ext_data->i915;
+	struct drm_i915_gem_memory_class_instance __user *uregions =
+		u64_to_user_ptr(args->data);
+	struct intel_memory_region **placements;
+	u32 mask;
+	int i, ret = 0;
+
+	if (args->handle) {
+		DRM_DEBUG("Handle should be zero\n");
+		ret = -EINVAL;
+	}
+
+	if (!args->size) {
+		DRM_DEBUG("Size is zero\n");
+		ret = -EINVAL;
+	}
+
+	if (args->size > ARRAY_SIZE(i915->mm.regions)) {
+		DRM_DEBUG("Too many placements\n");
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	placements = kmalloc_array(args->size,
+				   sizeof(struct intel_memory_region *),
+				   GFP_KERNEL);
+	if (!placements)
+		return -ENOMEM;
+
+	mask = 0;
+	for (i = 0; i < args->size; i++) {
+		struct drm_i915_gem_memory_class_instance region;
+		struct intel_memory_region *mr;
+
+		if (copy_from_user(&region, uregions, sizeof(region))) {
+			ret = -EFAULT;
+			goto out_free;
+		}
+
+		mr = intel_memory_region_lookup(i915,
+						region.memory_class,
+						region.memory_instance);
+		if (!mr) {
+			DRM_DEBUG("Device is missing region { class: %d, inst: %d } at index = %d\n",
+				  region.memory_class, region.memory_instance, i);
+			ret = -EINVAL;
+			goto out_dump;
+		}
+
+		if (mask & BIT(mr->id)) {
+			DRM_DEBUG("Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
+				  mr->name, region.memory_class,
+				  region.memory_instance, i);
+			ret = -EINVAL;
+			goto out_dump;
+		}
+
+		placements[i] = mr;
+		mask |= BIT(mr->id);
+
+		++uregions;
+	}
+
+	if (ext_data->placements) {
+		ret = -EINVAL;
+		goto out_dump;
+	}
+
+	ext_data->placements = placements;
+	ext_data->n_placements = args->size;
+
+	return 0;
+
+out_dump:
+	if (1) {
+		char buf[256];
+
+		if (ext_data->placements) {
+			repr_placements(buf,
+					sizeof(buf),
+					ext_data->placements,
+					ext_data->n_placements);
+			DRM_DEBUG("Placements were already set in previous SETPARAM. Existing placements: %s\n",
+				  buf);
+		}
+
+		repr_placements(buf, sizeof(buf), placements, i);
+		DRM_DEBUG("New placements(so far validated): %s\n", buf);
+	}
+
+out_free:
+	kfree(placements);
+	return ret;
+}
+
+static int __create_setparam(struct drm_i915_gem_object_param *args,
+			     struct create_ext *ext_data)
+{
+	if (!(args->param & I915_OBJECT_PARAM)) {
+		DRM_DEBUG("Missing I915_OBJECT_PARAM namespace\n");
+		return -EINVAL;
+	}
+
+	switch (lower_32_bits(args->param)) {
+	case I915_PARAM_MEMORY_REGIONS:
+		return set_placements(args, ext_data);
+	}
+
+	return -EINVAL;
+}
+
+static int create_setparam(struct i915_user_extension __user *base, void *data)
+{
+	struct drm_i915_gem_create_ext_setparam ext;
+
+	if (copy_from_user(&ext, base, sizeof(ext)))
+		return -EFAULT;
+
+	return __create_setparam(&ext.param, data);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+	[I915_GEM_CREATE_EXT_SETPARAM] = create_setparam,
+};
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct create_ext ext_data = { .i915 = i915 };
+	struct drm_i915_gem_create_ext *args = data;
+	int ret;
+
+	i915_gem_flush_free_objects(i915);
+
+	ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+				   create_extensions,
+				   ARRAY_SIZE(create_extensions),
+				   &ext_data);
+	if (ret)
+		goto err_free;
+
+	if (!ext_data.placements) {
+		struct intel_memory_region **placements;
+		enum intel_memory_type mem_type = INTEL_MEMORY_SYSTEM;
+
+		placements = kmalloc(sizeof(struct intel_memory_region *),
+				     GFP_KERNEL);
+		if (!placements)
+			return -ENOMEM;
+
+		placements[0] = intel_memory_region_by_type(i915, mem_type);
+
+		ext_data.placements = placements;
+		ext_data.n_placements = 1;
+	}
+
+	ret = i915_gem_create(file,
+			      ext_data.placements,
+			      ext_data.n_placements,
+			      &args->size, &args->handle);
+	if (!ret)
+		return 0;
+
+err_free:
+	kfree(ext_data.placements);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 49935245a4a8..89b530841126 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -254,6 +254,8 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		if (obj->ops->release)
 			obj->ops->release(obj);
 
+		kfree(obj->mm.placements);
+
 		/* But keep the pointer alive for RCU-protected lookups */
 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 		cond_resched();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 6d101275bc9d..115ad32c303f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -212,6 +212,15 @@  struct drm_i915_gem_object {
 		atomic_t pages_pin_count;
 		atomic_t shrink_pin;
 
+		/**
+		 * Priority list of potential placements for this object.
+		 */
+		struct intel_memory_region **placements;
+		int n_placements;
+
+		/* XXX: Nasty hack, see gem_create */
+		int gem_create_posted_err;
+
 		/**
 		 * Memory region for this object.
 		 */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 58bf5f9e3199..8f352ba6202d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -33,6 +33,10 @@  i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	unsigned int sg_page_sizes;
 	int ret;
 
+	/* XXX: Check if we have any post. This is nasty hack, see gem_create */
+	if (obj->mm.gem_create_posted_err)
+		return obj->mm.gem_create_posted_err;
+
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (!st)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 07b3a89ec09e..f4540c048cd9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1729,7 +1729,7 @@  static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef2124c17a7f..bf67f323a1ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,6 +43,7 @@ 
 #include "gem/i915_gem_clflush.h"
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_mman.h"
 #include "gem/i915_gem_region.h"
 #include "gt/intel_engine_user.h"
@@ -179,108 +180,6 @@  int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 	return ret;
 }
 
-static int
-i915_gem_create(struct drm_file *file,
-		struct intel_memory_region *mr,
-		u64 *size_p,
-		u32 *handle_p)
-{
-	struct drm_i915_gem_object *obj;
-	u32 handle;
-	u64 size;
-	int ret;
-
-	GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
-	size = round_up(*size_p, mr->min_page_size);
-	if (size == 0)
-		return -EINVAL;
-
-	/* For most of the ABI (e.g. mmap) we think in system pages */
-	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
-	/* Allocate the new object */
-	obj = i915_gem_object_create_region(mr, size, 0);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
-
-	ret = drm_gem_handle_create(file, &obj->base, &handle);
-	/* drop reference from allocate - handle holds it now */
-	i915_gem_object_put(obj);
-	if (ret)
-		return ret;
-
-	*handle_p = handle;
-	*size_p = size;
-	return 0;
-}
-
-int
-i915_gem_dumb_create(struct drm_file *file,
-		     struct drm_device *dev,
-		     struct drm_mode_create_dumb *args)
-{
-	enum intel_memory_type mem_type;
-	int cpp = DIV_ROUND_UP(args->bpp, 8);
-	u32 format;
-
-	switch (cpp) {
-	case 1:
-		format = DRM_FORMAT_C8;
-		break;
-	case 2:
-		format = DRM_FORMAT_RGB565;
-		break;
-	case 4:
-		format = DRM_FORMAT_XRGB8888;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* have to work out size/pitch and return them */
-	args->pitch = ALIGN(args->width * cpp, 64);
-
-	/* align stride to page size so that we can remap */
-	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
-						    DRM_FORMAT_MOD_LINEAR))
-		args->pitch = ALIGN(args->pitch, 4096);
-
-	if (args->pitch < args->width)
-		return -EINVAL;
-
-	args->size = mul_u32_u32(args->pitch, args->height);
-
-	mem_type = INTEL_MEMORY_SYSTEM;
-	if (HAS_LMEM(to_i915(dev)))
-		mem_type = INTEL_MEMORY_LOCAL;
-
-	return i915_gem_create(file,
-			       intel_memory_region_by_type(to_i915(dev),
-							   mem_type),
-			       &args->size, &args->handle);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file)
-{
-	struct drm_i915_private *i915 = to_i915(dev);
-	struct drm_i915_gem_create *args = data;
-
-	i915_gem_flush_free_objects(i915);
-
-	return i915_gem_create(file,
-			       intel_memory_region_by_type(i915,
-							   INTEL_MEMORY_SYSTEM),
-			       &args->size, &args->handle);
-}
-
 static int
 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
 	    bool needs_clflush)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 6f40748901da..67240bddf2ca 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -21,6 +21,26 @@  const struct intel_memory_region_info intel_region_map[] = {
        },
 };
 
+struct intel_memory_region *
+intel_memory_region_lookup(struct drm_i915_private *i915,
+			   u16 class, u16 instance)
+{
+	int i;
+
+	/* XXX: consider maybe converting to an rb tree at some point */
+	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); ++i) {
+		struct intel_memory_region *region = i915->mm.regions[i];
+
+		if (!region)
+			continue;
+
+		if (region->type == class && region->instance == instance)
+			return region;
+	}
+
+	return NULL;
+}
+
 struct intel_memory_region *
 intel_memory_region_by_type(struct drm_i915_private *i915,
 			    enum intel_memory_type mem_type)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 15dcb57b4b5a..20431d3ce490 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -102,6 +102,10 @@  struct intel_memory_region {
 	} objects;
 };
 
+struct intel_memory_region *
+intel_memory_region_lookup(struct drm_i915_private *i915,
+			   u16 class, u16 instance);
+
 int intel_memory_region_init_buddy(struct intel_memory_region *mem);
 void intel_memory_region_release_buddy(struct intel_memory_region *mem);
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 41845203250d..f6e3a0462414 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -391,6 +391,7 @@  typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
 #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
 #define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create_ext)
 #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -728,6 +729,27 @@  struct drm_i915_gem_create {
 	__u32 pad;
 };
 
+struct drm_i915_gem_create_ext {
+
+	/**
+	 * Requested size for the object.
+	 *
+	 * The (page-aligned) allocated size for the object will be returned.
+	 */
+	__u64 size;
+	/**
+	 * Returned handle for the object.
+	 *
+	 * Object handles are nonzero.
+	 */
+	__u32 handle;
+	__u32 pad;
+#define I915_GEM_CREATE_EXT_SETPARAM (1u << 0)
+#define I915_GEM_CREATE_EXT_FLAGS_UNKNOWN \
+	(-(I915_GEM_CREATE_EXT_SETPARAM << 1))
+	__u64 extensions;
+};
+
 struct drm_i915_gem_pread {
 	/** Handle for the object being read. */
 	__u32 handle;
@@ -1698,6 +1720,44 @@  struct drm_i915_gem_context_param {
 	__u64 value;
 };
 
+struct drm_i915_gem_object_param {
+	/* Object handle (0 for I915_GEM_CREATE_EXT_SETPARAM) */
+	__u32 handle;
+
+	/* Data pointer size */
+	__u32 size;
+
+/*
+ * I915_OBJECT_PARAM:
+ *
+ * Select object namespace for the param.
+ */
+#define I915_OBJECT_PARAM  (1ull<<32)
+
+/*
+ * I915_PARAM_MEMORY_REGIONS:
+ *
+ * Set the data pointer with the desired set of placements in priority
+ * order(each entry must be unique and supported by the device), as an array of
+ * drm_i915_gem_memory_class_instance, or an equivalent layout of class:instance
+ * pair encodings. See DRM_I915_QUERY_MEMORY_REGIONS for how to query the
+ * supported regions.
+ *
+ * Note that this requires the I915_OBJECT_PARAM namespace:
+ *	.param = I915_OBJECT_PARAM | I915_PARAM_MEMORY_REGIONS
+ */
+#define I915_PARAM_MEMORY_REGIONS 0x1
+	__u64 param;
+
+	/* Data value or pointer */
+	__u64 data;
+};
+
+struct drm_i915_gem_create_ext_setparam {
+	struct i915_user_extension base;
+	struct drm_i915_gem_object_param param;
+};
+
 /**
  * Context SSEU programming
  *