@@ -162,8 +162,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
info->size << PAGE_SHIFT);
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
@@ -149,8 +149,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_global_flag(obj),
get_pin_mapped_flag(obj),
obj->base.size / 1024,
- obj->base.read_domains,
- obj->base.write_domain,
+ obj->read_domains,
+ obj->write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -240,8 +240,8 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
obj->mm.dirty = false;
if (needs_clflush &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages);
@@ -704,10 +704,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
- if (!(obj->base.write_domain & flush_domains))
+ if (!(obj->write_domain & flush_domains))
return;
- switch (obj->base.write_domain) {
+ switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_flush_ggtt_writes(dev_priv);
@@ -732,7 +732,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
break;
}
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
static inline int
@@ -832,7 +832,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* anyway again before the next pread happens.
*/
if (!obj->cache_dirty &&
- !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ !(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE;
out:
@@ -891,7 +891,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* Same trick applies to invalidate partially written
* cachelines read before writing.
*/
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
}
@@ -2392,8 +2392,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -3614,7 +3614,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
@@ -3651,7 +3651,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+ if (obj->write_domain == I915_GEM_DOMAIN_WC)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3672,17 +3672,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* WC domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_WC;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_WC;
- obj->base.write_domain = I915_GEM_DOMAIN_WC;
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ obj->write_domain = I915_GEM_DOMAIN_WC;
obj->mm.dirty = true;
}
@@ -3714,7 +3714,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ if (obj->write_domain == I915_GEM_DOMAIN_GTT)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3735,17 +3735,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
}
@@ -4057,7 +4057,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
@@ -4110,15 +4110,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -4554,8 +4554,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
i915_gem_object_init(obj, &i915_gem_object_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU
@@ -5601,7 +5601,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
if (IS_ERR(obj))
return obj;
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
offset = 0;
@@ -177,7 +177,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
} else if (obj->mm.pages) {
__i915_do_clflush(obj);
} else {
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
obj->cache_dirty = false;
@@ -330,8 +330,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
* write-combined buffer or a delay through the chipset for GTT
* writes that do require us to treat GTT as a separate cache domain.)
*/
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
return &obj->base;
@@ -1073,7 +1073,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
- GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
@@ -1861,16 +1861,16 @@ void i915_vma_move_to_active(struct i915_vma *vma,
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
- obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
i915_gem_active_set(&obj->frontbuffer_write, req);
- obj->base.read_domains = 0;
+ obj->read_domains = 0;
}
- obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, req);
@@ -197,8 +197,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
@@ -269,6 +269,21 @@ struct drm_i915_gem_object {
struct drm_dma_handle *phys_handle;
struct reservation_object __builtin_resv;
+
+ /**
+ * @read_domains:
+ *
+ * Read memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated.
+ */
+ uint32_t read_domains;
+
+ /**
+ * @write_domain: Corresponding unique write memory domain.
+ */
+ uint32_t write_domain;
};
static inline struct drm_i915_gem_object *
@@ -516,7 +516,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
@@ -796,8 +796,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
@@ -997,8 +997,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
+ err->read_domains = obj->read_domains;
+ err->write_domain = obj->write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->mm.dirty;
@@ -129,8 +129,8 @@ huge_gem_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->scratch = phys_size;
@@ -178,8 +178,8 @@ huge_pages_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
obj->mm.page_mask = page_mask;
@@ -329,8 +329,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
else
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
return obj;
@@ -215,8 +215,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
}
i915_gem_obj_finish_shmem_access(obj);
- obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+ obj->write_domain = 0;
return 0;
}
@@ -113,8 +113,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */
@@ -115,21 +115,6 @@ struct drm_gem_object {
*/
int name;
- /**
- * @read_domains:
- *
- * Read memory domains. These monitor which caches contain read/write data
- * related to the object. When transitioning from one set of domains
- * to another, the driver is called to ensure that caches are suitably
- * flushed and invalidated.
- */
- uint32_t read_domains;
-
- /**
- * @write_domain: Corresponding unique write memory domain.
- */
- uint32_t write_domain;
-
/**
* @dma_buf:
*
i915 is the only driver using those fields in the drm_gem_object structure, so they only waste memory for all other drivers. Move the fields into drm_i915_gem_object instead and patch the i915 code with the following sed commands: sed -i "s/obj->base.read_domains/obj->read_domains/g" drivers/gpu/drm/i915/*.c drivers/gpu/drm/i915/*/*.c sed -i "s/obj->base.write_domain/obj->write_domain/g" drivers/gpu/drm/i915/*.c drivers/gpu/drm/i915/*/*.c Change is only compile tested. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/i915/gvt/dmabuf.c | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 4 +- drivers/gpu/drm/i915/i915_gem.c | 60 +++++++++++------------ drivers/gpu/drm/i915/i915_gem_clflush.c | 2 +- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 4 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 10 ++-- drivers/gpu/drm/i915/i915_gem_internal.c | 4 +- drivers/gpu/drm/i915/i915_gem_object.h | 15 ++++++ drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_gem_userptr.c | 4 +- drivers/gpu/drm/i915/i915_gpu_error.c | 4 +- drivers/gpu/drm/i915/selftests/huge_gem_object.c | 4 +- drivers/gpu/drm/i915/selftests/huge_pages.c | 8 +-- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 4 +- include/drm/drm_gem.h | 15 ------ 16 files changed, 74 insertions(+), 74 deletions(-)