@@ -140,7 +140,9 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_IS_MAPPABLE,
+
.get_pages = i915_gem_object_get_pages_internal,
.put_pages = i915_gem_object_put_pages_internal,
};
@@ -9,11 +9,47 @@
#include "i915_drv.h"
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .flags = I915_GEM_OBJECT_IS_MAPPABLE,
+
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
};
+/* XXX: Time to vfunc your life up? */
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+
+ return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!(obj->flags & I915_BO_ALLOC_CONTIGUOUS));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *region = obj->mm.region;
@@ -13,6 +13,14 @@ struct drm_i915_gem_object;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
@@ -158,6 +158,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
}
+static inline bool
+i915_gem_object_is_mappable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_MAPPABLE;
+}
+
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
@@ -31,7 +31,8 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
+#define I915_GEM_OBJECT_IS_MAPPABLE BIT(3)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -171,7 +172,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
void *ptr;
ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
+ if (i915_gem_object_is_lmem(obj))
+ io_mapping_unmap(ptr);
+ else if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
@@ -230,7 +233,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
@@ -243,6 +246,13 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
pgprot_t pgprot;
void *addr;
+ if (i915_gem_object_is_lmem(obj)) {
+ if (type != I915_MAP_WC)
+ return NULL;
+
+ return i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+ }
+
/* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
@@ -288,7 +298,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int err;
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ if (unlikely(!i915_gem_object_is_mappable(obj)))
return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock);
@@ -320,7 +330,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin;
}
- if (is_vmalloc_addr(ptr))
+ if (i915_gem_object_is_lmem(obj))
+ io_mapping_unmap(ptr);
+ else if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
@@ -422,7 +422,8 @@ static void shmem_release(struct drm_i915_gem_object *obj)
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_IS_MAPPABLE,
.get_pages = shmem_get_pages,
.put_pages = shmem_put_pages,
@@ -86,7 +86,9 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops huge_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_IS_MAPPABLE,
+
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
};
@@ -13,8 +13,10 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
static void close_objects(struct list_head *objects)
{
@@ -373,6 +375,79 @@ static int igt_lmem_create(void *arg)
return err;
}
+static int igt_lmem_write_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct drm_i915_gem_object *obj;
+ struct rnd_state prng;
+ u32 *vaddr;
+ u32 dword;
+ u32 val;
+ u32 sz;
+ int err;
+
+ if (!HAS_ENGINE(i915, BCS0))
+ return 0;
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+ obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ pr_err("Failed to iomap lmembar; err=%d\n", (int)PTR_ERR(vaddr));
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ val = prandom_u32_state(&prng);
+
+ /* Write from gpu and then read from cpu */
+ err = i915_gem_object_fill_blt(obj, ce, val);
+ if (err)
+ goto out_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_unpin;
+
+ for (dword = 0; dword < sz / sizeof(u32); ++dword) {
+ if (vaddr[dword] != val) {
+ pr_err("vaddr[%u]=%u, val=%u\n", dword, vaddr[dword],
+ val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ /* Write from the cpu and read again from the cpu */
+ memset32(vaddr, val ^ 0xdeadbeaf, sz / sizeof(u32));
+
+ for (dword = 0; dword < sz / sizeof(u32); ++dword) {
+ if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+ pr_err("vaddr[%u]=%u, val=%u\n", dword, vaddr[dword],
+ val ^ 0xdeadbeaf);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+out_unpin:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ return err;
+}
+
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -414,6 +489,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_write_cpu),
};
int err;