@@ -134,6 +134,7 @@ gem-y += \
gem/i915_gem_clflush.o \
gem/i915_gem_client_blt.o \
gem/i915_gem_context.o \
+ gem/i915_gem_create.o \
gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
new file mode 100644
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_object_blt.h"
+#include "gem/i915_gem_region.h"
+
+#include "i915_drv.h"
+#include "i915_user_extensions.h"
+
+static u32 max_page_size(struct intel_memory_region **placements,
+ int n_placements)
+{
+ u32 max_page_size = 0;
+ int i;
+
+ for (i = 0; i < n_placements; ++i) {
+ max_page_size = max_t(u32, max_page_size,
+ placements[i]->min_page_size);
+ }
+
+ GEM_BUG_ON(!max_page_size);
+ return max_page_size;
+}
+
+static int
+i915_gem_create(struct drm_file *file,
+ struct intel_memory_region **placements,
+ int n_placements,
+ u64 *size_p,
+ u32 *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ u32 handle;
+ u64 size;
+ int ret;
+
+ size = round_up(*size_p, max_page_size(placements, n_placements));
+ if (size == 0)
+ return -EINVAL;
+
+ /* For most of the ABI (e.g. mmap) we think in system pages */
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ /* Allocate the new object */
+ obj = i915_gem_object_create_region(placements[0], size, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (i915_gem_object_is_lmem(obj)) {
+ struct intel_gt *gt = obj->mm.region->gt;
+ struct intel_context *ce = gt->engine[BCS0]->blitter_context;
+
+ /*
+ * XXX: We really want to move this to get_pages(), but we
+ * require grabbing the BKL for the blitting operation which is
+ * annoying. In the pipeline is support for async get_pages()
+ * which should fit nicely for this. Also note that the actual
+ * clear should be done async(we currently do an object_wait
+ * which is pure garbage), we just need to take care if
+ * userspace opts of implicit sync for the execbuf, to avoid any
+ * potential info leak.
+ */
+
+retry:
+ ret = i915_gem_object_fill_blt(obj, ce, 0);
+ if (ret == -EINTR)
+ goto retry;
+ if (ret) {
+ /*
+ * XXX: Post the error to where we would normally gather
+ * and clear the pages. This better reflects the final
+ * uapi behaviour, once we are at the point where we can
+ * move the clear worker to get_pages().
+ */
+ i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ i915_gem_object_lock(obj, NULL);
+ __i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
+ obj->mm.gem_create_posted_err = ret;
+ goto handle_create;
+ }
+
+ /*
+ * XXX: Occasionally i915_gem_object_wait() called inside
+ * i915_gem_object_set_to_cpu_domain() get interrupted
+ * and return -ERESTARTSYS, this will cause go clearing
+ * code below and also set the gem_create_posted_err.
+ * moreover, the clearing sometimes fails because the
+ * object is still pinned by the blitter clearing code.
+ * this makes us to have an object with or without lmem
+ * pages, and with gem_create_posted_err = -ERESTARTSYS.
+ * Under lmem pressure, if the object has pages, we might
+ * swap out this object to smem. Next when user space
+ * code use this object in gem_execbuf() call, get_pages()
+ * operation will return -ERESTARTSYS error code, which
+ * causes user space code to fail.
+ *
+ * To avoid this problem, we add a non-interruptible
+ * wait before setting object to cpu domain.
+ */
+ i915_gem_object_lock(obj, NULL);
+ ret = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
+ if (!ret)
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret) {
+ i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ __i915_gem_object_put_pages(obj);
+ obj->mm.gem_create_posted_err = ret;
+ i915_gem_object_unlock(obj);
+ goto handle_create;
+ }
+ i915_gem_object_unlock(obj);
+ }
+
+handle_create:
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ /* drop reference from allocate - handle holds it now */
+ i915_gem_object_put(obj);
+ if (ret)
+ return ret;
+
+ obj->mm.placements = placements;
+ obj->mm.n_placements = n_placements;
+
+ *handle_p = handle;
+ *size_p = size;
+ return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct intel_memory_region **placements;
+ enum intel_memory_type mem_type;
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ u32 format;
+ int ret;
+
+ switch (cpp) {
+ case 1:
+ format = DRM_FORMAT_C8;
+ break;
+ case 2:
+ format = DRM_FORMAT_RGB565;
+ break;
+ case 4:
+ format = DRM_FORMAT_XRGB8888;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * cpp, 64);
+
+ /* align stride to page size so that we can remap */
+ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ DRM_FORMAT_MOD_LINEAR))
+ args->pitch = ALIGN(args->pitch, 4096);
+
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
+
+ mem_type = INTEL_MEMORY_SYSTEM;
+ if (HAS_LMEM(to_i915(dev)))
+ mem_type = INTEL_MEMORY_LOCAL;
+
+ placements = kmalloc(sizeof(struct intel_memory_region *), GFP_KERNEL);
+ if (!placements)
+ return -ENOMEM;
+
+ placements[0] = intel_memory_region_by_type(to_i915(dev), mem_type);
+
+ ret = i915_gem_create(file,
+ placements, 1,
+ &args->size, &args->handle);
+ if (ret)
+ kfree(placements);
+
+ return ret;
+}
+
+struct create_ext {
+ struct drm_i915_private *i915;
+ struct intel_memory_region **placements;
+ int n_placements;
+};
+
+static void repr_placements(char *buf, size_t size,
+ struct intel_memory_region **placements,
+ int n_placements)
+{
+ int i;
+
+ buf[0] = '\0';
+
+ for (i = 0; i < n_placements; i++) {
+ struct intel_memory_region *mr = placements[i];
+ int r;
+
+ r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
+ mr->name, mr->type, mr->instance);
+ if (r >= size)
+ return;
+
+ buf += r;
+ size -= r;
+ }
+}
+
+static int set_placements(struct drm_i915_gem_object_param *args,
+ struct create_ext *ext_data)
+{
+ struct drm_i915_private *i915 = ext_data->i915;
+ struct drm_i915_gem_memory_class_instance __user *uregions =
+ u64_to_user_ptr(args->data);
+ struct intel_memory_region **placements;
+ u32 mask;
+ int i, ret = 0;
+
+ if (args->handle) {
+ DRM_DEBUG("Handle should be zero\n");
+ ret = -EINVAL;
+ }
+
+ if (!args->size) {
+ DRM_DEBUG("Size is zero\n");
+ ret = -EINVAL;
+ }
+
+ if (args->size > ARRAY_SIZE(i915->mm.regions)) {
+ DRM_DEBUG("Too many placements\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ placements = kmalloc_array(args->size,
+ sizeof(struct intel_memory_region *),
+ GFP_KERNEL);
+ if (!placements)
+ return -ENOMEM;
+
+ mask = 0;
+ for (i = 0; i < args->size; i++) {
+ struct drm_i915_gem_memory_class_instance region;
+ struct intel_memory_region *mr;
+
+ if (copy_from_user(®ion, uregions, sizeof(region))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ mr = intel_memory_region_lookup(i915,
+ region.memory_class,
+ region.memory_instance);
+ if (!mr) {
+ DRM_DEBUG("Device is missing region { class: %d, inst: %d } at index = %d\n",
+ region.memory_class, region.memory_instance, i);
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ if (mask & BIT(mr->id)) {
+ DRM_DEBUG("Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
+ mr->name, region.memory_class,
+ region.memory_instance, i);
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ placements[i] = mr;
+ mask |= BIT(mr->id);
+
+ ++uregions;
+ }
+
+ if (ext_data->placements) {
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ ext_data->placements = placements;
+ ext_data->n_placements = args->size;
+
+ return 0;
+
+out_dump:
+ if (1) {
+ char buf[256];
+
+ if (ext_data->placements) {
+ repr_placements(buf,
+ sizeof(buf),
+ ext_data->placements,
+ ext_data->n_placements);
+ DRM_DEBUG("Placements were already set in previous SETPARAM. Existing placements: %s\n",
+ buf);
+ }
+
+ repr_placements(buf, sizeof(buf), placements, i);
+ DRM_DEBUG("New placements(so far validated): %s\n", buf);
+ }
+
+out_free:
+ kfree(placements);
+ return ret;
+}
+
+static int __create_setparam(struct drm_i915_gem_object_param *args,
+ struct create_ext *ext_data)
+{
+ if (!(args->param & I915_OBJECT_PARAM)) {
+ DRM_DEBUG("Missing I915_OBJECT_PARAM namespace\n");
+ return -EINVAL;
+ }
+
+ switch (lower_32_bits(args->param)) {
+ case I915_PARAM_MEMORY_REGIONS:
+ return set_placements(args, ext_data);
+ }
+
+ return -EINVAL;
+}
+
+static int create_setparam(struct i915_user_extension __user *base, void *data)
+{
+ struct drm_i915_gem_create_ext_setparam ext;
+
+ if (copy_from_user(&ext, base, sizeof(ext)))
+ return -EFAULT;
+
+ return __create_setparam(&ext.param, data);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+ [I915_GEM_CREATE_EXT_SETPARAM] = create_setparam,
+};
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct create_ext ext_data = { .i915 = i915 };
+ struct drm_i915_gem_create_ext *args = data;
+ int ret;
+
+ i915_gem_flush_free_objects(i915);
+
+ ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ create_extensions,
+ ARRAY_SIZE(create_extensions),
+ &ext_data);
+ if (ret)
+ goto err_free;
+
+ if (!ext_data.placements) {
+ struct intel_memory_region **placements;
+ enum intel_memory_type mem_type = INTEL_MEMORY_SYSTEM;
+
+ placements = kmalloc(sizeof(struct intel_memory_region *),
+ GFP_KERNEL);
+ if (!placements)
+ return -ENOMEM;
+
+ placements[0] = intel_memory_region_by_type(i915, mem_type);
+
+ ext_data.placements = placements;
+ ext_data.n_placements = 1;
+ }
+
+ ret = i915_gem_create(file,
+ ext_data.placements,
+ ext_data.n_placements,
+ &args->size, &args->handle);
+ if (!ret)
+ return 0;
+
+err_free:
+ kfree(ext_data.placements);
+ return ret;
+}
@@ -254,6 +254,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->ops->release)
obj->ops->release(obj);
+ kfree(obj->mm.placements);
+
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
cond_resched();
@@ -212,6 +212,15 @@ struct drm_i915_gem_object {
atomic_t pages_pin_count;
atomic_t shrink_pin;
+ /**
+ * Priority list of potential placements for this object.
+ */
+ struct intel_memory_region **placements;
+ int n_placements;
+
+ /* XXX: Nasty hack, see gem_create */
+ int gem_create_posted_err;
+
/**
* Memory region for this object.
*/
@@ -33,6 +33,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
unsigned int sg_page_sizes;
int ret;
+ /* XXX: Check if we have any post. This is nasty hack, see gem_create */
+ if (obj->mm.gem_create_posted_err)
+ return obj->mm.gem_create_posted_err;
+
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
@@ -1729,7 +1729,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
@@ -43,6 +43,7 @@
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_region.h"
#include "gt/intel_engine_user.h"
@@ -179,108 +180,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
return ret;
}
-static int
-i915_gem_create(struct drm_file *file,
- struct intel_memory_region *mr,
- u64 *size_p,
- u32 *handle_p)
-{
- struct drm_i915_gem_object *obj;
- u32 handle;
- u64 size;
- int ret;
-
- GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
- size = round_up(*size_p, mr->min_page_size);
- if (size == 0)
- return -EINVAL;
-
- /* For most of the ABI (e.g. mmap) we think in system pages */
- GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
- /* Allocate the new object */
- obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- /* drop reference from allocate - handle holds it now */
- i915_gem_object_put(obj);
- if (ret)
- return ret;
-
- *handle_p = handle;
- *size_p = size;
- return 0;
-}
-
-int
-i915_gem_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- enum intel_memory_type mem_type;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- u32 format;
-
- switch (cpp) {
- case 1:
- format = DRM_FORMAT_C8;
- break;
- case 2:
- format = DRM_FORMAT_RGB565;
- break;
- case 4:
- format = DRM_FORMAT_XRGB8888;
- break;
- default:
- return -EINVAL;
- }
-
- /* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * cpp, 64);
-
- /* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
- DRM_FORMAT_MOD_LINEAR))
- args->pitch = ALIGN(args->pitch, 4096);
-
- if (args->pitch < args->width)
- return -EINVAL;
-
- args->size = mul_u32_u32(args->pitch, args->height);
-
- mem_type = INTEL_MEMORY_SYSTEM;
- if (HAS_LMEM(to_i915(dev)))
- mem_type = INTEL_MEMORY_LOCAL;
-
- return i915_gem_create(file,
- intel_memory_region_by_type(to_i915(dev),
- mem_type),
- &args->size, &args->handle);
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_create *args = data;
-
- i915_gem_flush_free_objects(i915);
-
- return i915_gem_create(file,
- intel_memory_region_by_type(i915,
- INTEL_MEMORY_SYSTEM),
- &args->size, &args->handle);
-}
-
static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
@@ -21,6 +21,26 @@ const struct intel_memory_region_info intel_region_map[] = {
},
};
+struct intel_memory_region *
+intel_memory_region_lookup(struct drm_i915_private *i915,
+ u16 class, u16 instance)
+{
+ int i;
+
+ /* XXX: consider maybe converting to an rb tree at some point */
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); ++i) {
+ struct intel_memory_region *region = i915->mm.regions[i];
+
+ if (!region)
+ continue;
+
+ if (region->type == class && region->instance == instance)
+ return region;
+ }
+
+ return NULL;
+}
+
struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private *i915,
enum intel_memory_type mem_type)
@@ -102,6 +102,10 @@ struct intel_memory_region {
} objects;
};
+struct intel_memory_region *
+intel_memory_region_lookup(struct drm_i915_private *i915,
+ u16 class, u16 instance);
+
int intel_memory_region_init_buddy(struct intel_memory_region *mem);
void intel_memory_region_release_buddy(struct intel_memory_region *mem);
@@ -391,6 +391,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create_ext)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -728,6 +729,27 @@ struct drm_i915_gem_create {
__u32 pad;
};
+struct drm_i915_gem_create_ext {
+
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ __u32 pad;
+#define I915_GEM_CREATE_EXT_SETPARAM (1u << 0)
+#define I915_GEM_CREATE_EXT_FLAGS_UNKNOWN \
+ (-(I915_GEM_CREATE_EXT_SETPARAM << 1))
+ __u64 extensions;
+};
+
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;
@@ -1698,6 +1720,44 @@ struct drm_i915_gem_context_param {
__u64 value;
};
+struct drm_i915_gem_object_param {
+ /* Object handle (0 for I915_GEM_CREATE_EXT_SETPARAM) */
+ __u32 handle;
+
+ /* Data pointer size */
+ __u32 size;
+
+/*
+ * I915_OBJECT_PARAM:
+ *
+ * Select object namespace for the param.
+ */
+#define I915_OBJECT_PARAM (1ull<<32)
+
+/*
+ * I915_PARAM_MEMORY_REGIONS:
+ *
+ * Set the data pointer with the desired set of placements in priority
+ * order(each entry must be unique and supported by the device), as an array of
+ * drm_i915_gem_memory_class_instance, or an equivalent layout of class:instance
+ * pair encodings. See DRM_I915_QUERY_MEMORY_REGIONS for how to query the
+ * supported regions.
+ *
+ * Note that this requires the I915_OBJECT_PARAM namespace:
+ * .param = I915_OBJECT_PARAM | I915_PARAM_MEMORY_REGIONS
+ */
+#define I915_PARAM_MEMORY_REGIONS 0x1
+ __u64 param;
+
+ /* Data value or pointer */
+ __u64 data;
+};
+
+struct drm_i915_gem_create_ext_setparam {
+ struct i915_user_extension base;
+ struct drm_i915_gem_object_param param;
+};
+
/**
* Context SSEU programming
*