@@ -59,6 +59,7 @@ i915-y += i915_drv.o \
i915-y += \
i915_memcpy.o \
i915_mm.o \
+ intel_memory_region.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_user_extensions.o
@@ -64,6 +64,15 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
+ /**
+ * Memory region for this object.
+ */
+ struct intel_memory_region *memory_region;
+ /**
+ * List of memory region blocks allocated for this object.
+ */
+ struct list_head blocks;
+
struct {
/**
* @vma.lock: protect the list/tree of vmas
@@ -17,6 +17,7 @@
#include "selftests/mock_drm.h"
#include "selftests/mock_gem_device.h"
+#include "selftests/mock_region.h"
#include "selftests/i915_random.h"
static const unsigned int page_sizes[] = {
@@ -447,6 +448,86 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
return err;
}
+
+static int igt_mock_memory_region_huge_pages(void *arg)
+{
+ struct i915_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct intel_memory_region *mem;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int bit;
+ int err = 0;
+
+ mem = mock_region_create(i915, 0, SZ_2G,
+ I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ return PTR_ERR(mem);
+ }
+
+ for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ unsigned int page_size = BIT(bit);
+ resource_size_t phys;
+
+ obj = i915_gem_object_create_region(mem, page_size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_destroy_device;
+ }
+
+ pr_info("memory region start(%pa)\n",
+ &obj->memory_region->region.start);
+ pr_info("creating object, size=%x\n", page_size);
+
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ phys = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(phys, page_size)) {
+ pr_err("memory region misaligned(%pa)\n", &phys);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("page_sizes.gtt=%u, expected=%u\n",
+ vma->page_sizes.gtt, page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_put(obj);
+ }
+
+ goto out_destroy_device;
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_destroy_device:
+ mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_memory_region_destroy(mem);
+
+ return err;
+}
+
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
@@ -1679,6 +1760,7 @@ int i915_gem_huge_page_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
@@ -77,6 +77,7 @@
#include "intel_device_info.h"
#include "intel_runtime_pm.h"
+#include "intel_memory_region.h"
#include "intel_uc.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
@@ -1815,4 +1815,5 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
+#include "selftests/intel_memory_region.c"
#endif
new file mode 100644
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_drv.h"
+
+static void
+memory_region_free_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+
+ struct i915_buddy_block *block, *on;
+
+ lockdep_assert_held(&obj->memory_region->mm_lock);
+
+ list_for_each_entry_safe(block, on, &obj->blocks, link) {
+ list_del_init(&block->link);
+ i915_buddy_free(&obj->memory_region->mm, block);
+ }
+
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+void
+i915_memory_region_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ mutex_lock(&obj->memory_region->mm_lock);
+ memory_region_free_pages(obj, pages);
+ mutex_unlock(&obj->memory_region->mm_lock);
+
+ obj->mm.dirty = false;
+}
+
+int
+i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->memory_region;
+ resource_size_t size = obj->base.size;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ unsigned long n_pages;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.min_size));
+ GEM_BUG_ON(!list_empty(&obj->blocks));
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ n_pages = size >> ilog2(mem->mm.min_size);
+
+ if (sg_alloc_table(st, n_pages, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+
+ mutex_lock(&mem->mm_lock);
+
+ do {
+ struct i915_buddy_block *block;
+ unsigned int order;
+ u64 block_size;
+ u64 offset;
+
+ order = fls(n_pages) - 1;
+ GEM_BUG_ON(order > mem->mm.max_order);
+
+ do {
+ block = i915_buddy_alloc(&mem->mm, order);
+ if (!IS_ERR(block))
+ break;
+
+ /* XXX: some kind of eviction pass, local to the device */
+ if (!order--)
+ goto err_free_blocks;
+ } while (1);
+
+ n_pages -= BIT(order);
+
+ INIT_LIST_HEAD(&block->link);
+ list_add(&block->link, &obj->blocks);
+
+ /*
+ * TODO: it might be worth checking consecutive blocks here and
+ * coalesce if we can.
+ */
+ block_size = i915_buddy_block_size(&mem->mm, block);
+ offset = i915_buddy_block_offset(block);
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = block_size;
+
+ sg->length = block_size;
+ sg_page_sizes |= block_size;
+ st->nents++;
+
+ if (!n_pages) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while (1);
+
+ mutex_unlock(&mem->mm_lock);
+
+ i915_sg_trim(st);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err_free_blocks:
+ memory_region_free_pages(obj, st);
+ mutex_unlock(&mem->mm_lock);
+ return -ENXIO;
+}
+
+int i915_memory_region_init_buddy(struct intel_memory_region *mem)
+{
+ return i915_buddy_init(&mem->mm, resource_size(&mem->region),
+ mem->min_page_size);
+}
+
+void i915_memory_region_release_buddy(struct intel_memory_region *mem)
+{
+ i915_buddy_fini(&mem->mm);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+
+ if (!mem)
+ return ERR_PTR(-ENODEV);
+
+ size = round_up(size, mem->min_page_size);
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = mem->ops->create_object(mem, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ INIT_LIST_HEAD(&obj->blocks);
+ obj->memory_region = mem;
+
+ return obj;
+}
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops)
+{
+ struct intel_memory_region *mem;
+ int err;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ mem->i915 = i915;
+ mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+ mem->io_start = io_start;
+ mem->min_page_size = min_page_size;
+ mem->ops = ops;
+
+ mutex_init(&mem->mm_lock);
+
+ if (ops->init) {
+ err = ops->init(mem);
+ if (err) {
+ kfree(mem);
+ mem = ERR_PTR(err);
+ }
+ }
+
+ return mem;
+}
+
+void
+intel_memory_region_destroy(struct intel_memory_region *mem)
+{
+ if (mem->ops->release)
+ mem->ops->release(mem);
+
+ kfree(mem);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_region.c"
+#endif
new file mode 100644
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_MEMORY_REGION_H__
+#define __INTEL_MEMORY_REGION_H__
+
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/io-mapping.h>
+
+#include "i915_buddy.h"
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+struct sg_table;
+
+/**
+ * Base memory type
+ */
+enum intel_memory_type {
+ INTEL_SMEM = 0,
+ INTEL_LMEM,
+ INTEL_STOLEN,
+};
+
+enum intel_region_id {
+ INTEL_MEMORY_SMEM = 0,
+ INTEL_MEMORY_LMEM,
+ INTEL_MEMORY_STOLEN,
+ INTEL_MEMORY_UKNOWN, /* Should be last */
+};
+
+#define REGION_SMEM BIT(INTEL_MEMORY_SMEM)
+#define REGION_LMEM BIT(INTEL_MEMORY_LMEM)
+#define REGION_STOLEN BIT(INTEL_MEMORY_STOLEN)
+
+#define INTEL_MEMORY_TYPE_SHIFT 16
+
+#define MEMORY_TYPE_FROM_REGION(r) (ilog2(r >> INTEL_MEMORY_TYPE_SHIFT))
+#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2(r & 0xffff))
+
+/**
+ * Memory regions encoded as type | instance
+ */
+static const u32 intel_region_map[] = {
+ [INTEL_MEMORY_SMEM] = BIT(INTEL_SMEM + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
+ [INTEL_MEMORY_LMEM] = BIT(INTEL_LMEM + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
+ [INTEL_MEMORY_STOLEN] = BIT(INTEL_STOLEN + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
+};
+
+struct intel_memory_region_ops {
+ unsigned int flags;
+
+ int (*init)(struct intel_memory_region *);
+ void (*release)(struct intel_memory_region *);
+
+ struct drm_i915_gem_object *
+ (*create_object)(struct intel_memory_region *,
+ resource_size_t,
+ unsigned int);
+};
+
+struct intel_memory_region {
+ struct drm_i915_private *i915;
+
+ const struct intel_memory_region_ops *ops;
+
+ struct io_mapping iomap;
+ struct resource region;
+
+ struct i915_buddy_mm mm;
+ struct mutex mm_lock;
+
+ resource_size_t io_start;
+ resource_size_t min_page_size;
+
+ unsigned int type;
+ unsigned int instance;
+ unsigned int id;
+};
+
+int i915_memory_region_init_buddy(struct intel_memory_region *mem);
+void i915_memory_region_release_buddy(struct intel_memory_region *mem);
+
+int i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj);
+void i915_memory_region_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops);
+void
+intel_memory_region_destroy(struct intel_memory_region *mem);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif
@@ -26,3 +26,4 @@ selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
selftest(buddy, i915_buddy_mock_selftests)
+selftest(memory_region, intel_memory_region_mock_selftests)
new file mode 100644
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "gem/selftests/mock_context.h"
+#include "mock_drm.h"
+
+static void close_objects(struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ /* No polluting the memory region between tests */
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+ list_del(&obj->st_link);
+ }
+}
+
+static int igt_mock_fill(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ resource_size_t total = resource_size(&mem->region);
+ resource_size_t page_size;
+ resource_size_t rem;
+ unsigned long max_pages;
+ unsigned long page_num;
+ LIST_HEAD(objects);
+ int err = 0;
+
+ page_size = mem->mm.min_size;
+ max_pages = div64_u64(total, page_size);
+ rem = total;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ resource_size_t size = page_num * page_size;
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_region(mem, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+ rem -= size;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+ if (err == -ENXIO) {
+ if (page_num * page_size <= rem) {
+ pr_err("igt_mock_fill failed, space still left in region\n");
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
+ }
+
+ close_objects(&objects);
+
+ return err;
+}
+
+int intel_memory_region_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_fill),
+ };
+ struct intel_memory_region *mem;
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mem = mock_region_create(i915, 0, SZ_2G,
+ I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_unref;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, mem);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+ intel_memory_region_destroy(mem);
+
+out_unref:
+ drm_dev_put(&i915->drm);
+
+ return err;
+}
@@ -32,6 +32,7 @@
#include "mock_gem_device.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "mock_region.h"
#include "gem/selftests/mock_context.h"
#include "gem/selftests/mock_gem_object.h"
new file mode 100644
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "mock_region.h"
+
+static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .get_pages = i915_memory_region_get_pages_buddy,
+ .put_pages = i915_memory_region_put_pages_buddy,
+};
+
+static struct drm_i915_gem_object *
+mock_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ unsigned int cache_level;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.min_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &mock_region_obj_ops);
+
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+
+ cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+
+ return obj;
+}
+
+static const struct intel_memory_region_ops mock_region_ops = {
+ .init = i915_memory_region_init_buddy,
+ .release = i915_memory_region_release_buddy,
+ .create_object = mock_object_create,
+};
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start)
+{
+ return intel_memory_region_create(i915, start, size, min_page_size,
+ io_start, &mock_region_ops);
+}
new file mode 100644
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __MOCK_REGION_H
+#define __MOCK_REGION_H
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start);
+
+#endif /* !__MOCK_REGION_H */