@@ -606,7 +606,7 @@ static int i915_gem_object_region_select(struct drm_i915_private *dev_priv,
ret = i915_gem_object_migrate(obj, ce, id);
if (!ret) {
if (MEMORY_TYPE_FROM_REGION(region) ==
- INTEL_LMEM) {
+ DRM_MEM_VRAM) {
/*
* TODO: this should be part of get_pages(),
* when async get_pages arrives
@@ -53,7 +53,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (obj->base.size > resource_size(&mem->region))
+ if (obj->base.size > mem->region.size)
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -2737,20 +2737,20 @@ int i915_gem_init_memory_regions(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(intel_region_map); i++) {
struct intel_memory_region *mem = NULL;
- u32 type;
+ u8 type;
if (!HAS_REGION(i915, BIT(i)))
continue;
type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
switch (type) {
- case INTEL_SMEM:
+ case DRM_MEM_SYSTEM:
mem = i915_gem_shmem_setup(i915);
break;
- case INTEL_STOLEN:
+ case DRM_MEM_STOLEN:
mem = i915_gem_stolen_setup(i915);
break;
- case INTEL_LMEM:
+ case DRM_MEM_VRAM:
mem = i915_gem_setup_fake_lmem(i915);
break;
}
@@ -2762,7 +2762,7 @@ int i915_gem_init_memory_regions(struct drm_i915_private *i915)
}
mem->id = intel_region_map[i];
- mem->type = type;
+ mem->region.type = type;
mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
i915->regions[i] = mem;
@@ -1048,7 +1048,7 @@ i915_error_object_create(struct drm_i915_private *i915,
struct intel_memory_region *mem = vma->obj->memory_region;
for_each_sgt_dma(dma, iter, vma->pages) {
- s = io_mapping_map_atomic_wc(&mem->iomap, dma);
+ s = io_mapping_map_atomic_wc(&mem->region.iomap, dma);
ret = compress_page(compress, s, dst);
io_mapping_unmap_atomic(s);
@@ -184,7 +184,7 @@ static int query_memregion_info(struct drm_i915_private *dev_priv,
continue;
info.id = region->id;
- info.size = resource_size(®ion->region);
+ info.size = region->region.size;
if (__copy_to_user(info_ptr, &info, sizeof(info)))
return -EFAULT;
@@ -200,7 +200,7 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj)
int i915_memory_region_init_buddy(struct intel_memory_region *mem)
{
- return i915_buddy_init(&mem->mm, resource_size(&mem->region),
+ return i915_buddy_init(&mem->mm, mem->region.size,
mem->min_page_size);
}
@@ -285,10 +285,12 @@ intel_memory_region_create(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
mem->i915 = i915;
- mem->region = (struct resource)DEFINE_RES_MEM(start, size);
- mem->io_start = io_start;
- mem->min_page_size = min_page_size;
mem->ops = ops;
+ /* FIXME drm_mem_region_init? */
+ mem->region.start = start;
+ mem->region.size = size;
+ mem->region.io_start = io_start;
+ mem->min_page_size = min_page_size;
mutex_init(&mem->obj_lock);
INIT_LIST_HEAD(&mem->objects);
@@ -19,14 +19,8 @@ struct intel_memory_region;
struct sg_table;
/**
- * Base memory type
+ * Define supported memory regions
*/
-enum intel_memory_type {
- INTEL_SMEM = 0,
- INTEL_LMEM,
- INTEL_STOLEN,
-};
-
enum intel_region_id {
INTEL_MEMORY_SMEM = 0,
INTEL_MEMORY_LMEM,
@@ -47,9 +41,9 @@ enum intel_region_id {
* Memory regions encoded as type | instance
*/
static const u32 intel_region_map[] = {
- [INTEL_MEMORY_SMEM] = BIT(INTEL_SMEM + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
- [INTEL_MEMORY_LMEM] = BIT(INTEL_LMEM + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
- [INTEL_MEMORY_STOLEN] = BIT(INTEL_STOLEN + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
+ [INTEL_MEMORY_SMEM] = BIT(DRM_MEM_SYSTEM + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
+ [INTEL_MEMORY_LMEM] = BIT(DRM_MEM_VRAM + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
+ [INTEL_MEMORY_STOLEN] = BIT(DRM_MEM_STOLEN + INTEL_MEMORY_TYPE_SHIFT) | BIT(0),
};
struct intel_memory_region_ops {
@@ -69,8 +63,7 @@ struct intel_memory_region {
const struct intel_memory_region_ops *ops;
- struct io_mapping iomap;
- struct resource region;
+ struct drm_mem_region region;
/* For faking for lmem */
struct drm_mm_node fake_mappable;
@@ -78,10 +71,8 @@ struct intel_memory_region {
struct i915_buddy_mm mm;
struct mutex mm_lock;
- resource_size_t io_start;
resource_size_t min_page_size;
- unsigned int type;
unsigned int instance;
unsigned int id;
@@ -250,7 +250,7 @@ static int i915_gem_init_fake_lmem_bar(struct intel_memory_region *mem)
int ret;
mem->fake_mappable.start = 0;
- mem->fake_mappable.size = resource_size(&mem->region);
+ mem->fake_mappable.size = mem->region.size;
mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
@@ -277,7 +277,7 @@ static void
region_lmem_release(struct intel_memory_region *mem)
{
i915_gem_relase_fake_lmem_bar(mem);
- io_mapping_fini(&mem->iomap);
+ io_mapping_fini(&mem->region.iomap);
i915_memory_region_release_buddy(mem);
}
@@ -294,14 +294,14 @@ region_lmem_init(struct intel_memory_region *mem)
}
}
- if (!io_mapping_init_wc(&mem->iomap,
- mem->io_start,
- resource_size(&mem->region)))
+ if (!io_mapping_init_wc(&mem->region.iomap,
+ mem->region.io_start,
+ mem->region.size))
return -EIO;
ret = i915_memory_region_init_buddy(mem);
if (ret)
- io_mapping_fini(&mem->iomap);
+ io_mapping_fini(&mem->region.iomap);
return ret;
}
@@ -321,7 +321,7 @@ void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
offset = i915_gem_object_get_dma_address(obj, n);
offset -= intel_graphics_fake_lmem_res.start;
- return io_mapping_map_atomic_wc(&obj->memory_region->iomap, offset);
+ return io_mapping_map_atomic_wc(&obj->memory_region->region.iomap, offset);
}
void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
@@ -335,7 +335,7 @@ void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
offset = i915_gem_object_get_dma_address(obj, n);
offset -= intel_graphics_fake_lmem_res.start;
- return io_mapping_map_wc(&obj->memory_region->iomap, offset, size);
+ return io_mapping_map_wc(&obj->memory_region->region.iomap, offset, size);
}
resource_size_t i915_gem_object_lmem_io_offset(struct drm_i915_gem_object *obj,
@@ -352,14 +352,14 @@ resource_size_t i915_gem_object_lmem_io_offset(struct drm_i915_gem_object *obj,
daddr = i915_gem_object_get_dma_address(obj, n);
daddr -= intel_graphics_fake_lmem_res.start;
- return mem->io_start + daddr;
+ return mem->region.io_start + daddr;
}
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
- struct intel_memory_region *region = obj->memory_region;
+ struct intel_memory_region *mem = obj->memory_region;
- return region && region->type == INTEL_LMEM;
+ return mem && mem->region.type == DRM_MEM_VRAM;
}
struct drm_i915_gem_object *
@@ -395,9 +395,9 @@ i915_gem_setup_fake_lmem(struct drm_i915_private *i915)
io_start,
®ion_lmem_ops);
if (!IS_ERR(mem)) {
- DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
+ DRM_INFO("Intel graphics fake LMEM: %pR\n", mem);
DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
- (u64)mem->io_start);
+ (u64)mem->region.io_start);
}
return mem;
@@ -32,7 +32,7 @@ static void close_objects(struct list_head *objects)
static int igt_mock_fill(void *arg)
{
struct intel_memory_region *mem = arg;
- resource_size_t total = resource_size(&mem->region);
+ resource_size_t total = mem->region.size;
resource_size_t page_size;
resource_size_t rem;
unsigned long max_pages;
@@ -98,7 +98,7 @@ static int igt_frag_region(struct intel_memory_region *mem,
int err = 0;
target = mem->mm.min_size;
- total = resource_size(&mem->region);
+ total = mem->region.size;
n_objects = total / target;
while (n_objects--) {
@@ -152,7 +152,7 @@ static int igt_mock_evict(void *arg)
if (err)
return err;
- total = resource_size(&mem->region);
+ total = mem->region.size;
target = mem->mm.min_size;
while (target <= total / 2) {
@@ -198,7 +198,7 @@ static int igt_mock_continuous(void *arg)
if (err)
return err;
- total = resource_size(&mem->region);
+ total = mem->region.size;
target = total / 2;
/*
Some fields are deleted from intel_memory_region in favor of instead using the new nested drm_mem_region structure. Note, this is based upon unmerged i915 series [1] in order to show how i915 might begin to integrate the proposed drm_mem_region. [1] https://lists.freedesktop.org/archives/intel-gfx/2019-June/203649.html Signed-off-by: Brian Welty <brian.welty@intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +++---- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- drivers/gpu/drm/i915/i915_query.c | 2 +- drivers/gpu/drm/i915/intel_memory_region.c | 10 ++++--- drivers/gpu/drm/i915/intel_memory_region.h | 19 ++++---------- drivers/gpu/drm/i915/intel_region_lmem.c | 26 +++++++++---------- .../drm/i915/selftests/intel_memory_region.c | 8 +++--- 9 files changed, 37 insertions(+), 44 deletions(-)