@@ -2083,17 +2083,8 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
/* i915_gem_gtt.c */
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj);
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@ -2696,12 +2696,8 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
- if (obj->has_global_gtt_mapping)
- i915_gem_gtt_unbind_object(obj);
- if (obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
- obj->has_aliasing_ppgtt_mapping = 0;
- }
+ vma->vm->unbind_vma(vma);
+
i915_gem_gtt_finish_object(obj);
i915_gem_object_unpin_pages(obj);
@@ -3428,7 +3424,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma;
int ret;
@@ -3467,11 +3462,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
}
- if (obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, cache_level);
- if (obj->has_aliasing_ppgtt_mapping)
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, cache_level);
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ vma->vm->bind_vma(vma, cache_level, 0);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3799,6 +3791,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
bool map_and_fenceable,
bool nonblocking)
{
+ const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
struct i915_vma *vma;
int ret;
@@ -3827,20 +3820,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (!i915_gem_obj_bound(obj, vm)) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
- }
+ vma = i915_gem_obj_to_vma(obj, vm);
+ vm->bind_vma(vma, obj->cache_level, flags);
+ } else
+ vma = i915_gem_obj_to_vma(obj, vm);
+ /* Objects are created map and fenceable. If we bind an object
+ * the first time, and we had aliasing PPGTT (and didn't request
+ * GLOBAL), we'll need to do this on the second bind.*/
if (!obj->has_global_gtt_mapping && map_and_fenceable)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ vm->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
obj->pin_count++;
obj->pin_mappable |= map_and_fenceable;
@@ -391,6 +391,7 @@ mi_set_context(struct intel_ring_buffer *ring,
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
@@ -415,8 +416,11 @@ static int do_switch(struct i915_hw_context *to)
return ret;
}
- if (!to->obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+ if (!to->obj->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ &dev_priv->gtt.base);
+ vma->vm->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ }
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
@@ -286,8 +286,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
- i915_gem_gtt_bind_object(target_i915_obj,
- target_i915_obj->cache_level);
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+ vma->vm->bind_vma(vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
}
/* Validate that the target is in a valid r/w GPU domain */
@@ -464,11 +465,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_ring_buffer *ring,
bool *need_reloc)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
- struct drm_i915_gem_object *obj = vma->obj;
+ u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
+ !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
int ret;
need_fence =
@@ -497,14 +499,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
}
}
- /* Ensure ppgtt mapping exists if needed */
- if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
- i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
- obj, obj->cache_level);
-
- obj->has_aliasing_ppgtt_mapping = 1;
- }
-
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start;
*need_reloc = true;
@@ -515,9 +509,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
}
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
- !obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ vma->vm->bind_vma(vma, obj->cache_level, flags);
return 0;
}
@@ -1117,8 +1109,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but let's be paranoid and do it
* unconditionally for now. */
- if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+ if (flags & I915_DISPATCH_SECURE &&
+ !batch_obj->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(batch_obj, vm);
+ vm->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+ }
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
@@ -437,15 +437,6 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
dev_priv->mm.aliasing_ppgtt = NULL;
}
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
-{
- ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
- i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- cache_level);
-}
-
static void __always_unused
gen6_ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
@@ -458,14 +449,6 @@ gen6_ppgtt_bind_vma(struct i915_vma *vma,
gen6_ppgtt_insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
}
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_object *obj)
-{
- ppgtt->base.clear_range(&ppgtt->base,
- i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
-}
-
static void __always_unused gen6_ppgtt_unbind_vma(struct i915_vma *vma)
{
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
@@ -523,8 +506,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+ &dev_priv->gtt.base);
i915_gem_clflush_object(obj, obj->pin_display);
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ vma->vm->bind_vma(vma, obj->cache_level, 0);
}
i915_gem_chipset_flush(dev);
@@ -677,33 +662,6 @@ static void gen6_ggtt_bind_vma(struct i915_vma *vma,
}
}
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
- dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
- entry,
- cache_level);
-
- obj->has_global_gtt_mapping = 1;
-}
-
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- entry,
- obj->base.size >> PAGE_SHIFT);
-
- obj->has_global_gtt_mapping = 0;
-}
-
static void gen6_ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_device *dev = vma->vm->dev;