@@ -2088,6 +2088,8 @@ int i915_gem_find_vm_space_generic(struct i915_address_space *vm,
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
#define PIN_GLOBAL 0x4
+#define PIN_ALIASING 0x8
+#define PIN_GLOBAL_ALIASED (PIN_ALIASING | PIN_GLOBAL)
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
@@ -2314,7 +2316,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
unsigned flags)
{
- return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
+ return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL_ALIASED);
}
static inline int
@@ -3265,8 +3265,12 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
size_t gtt_max =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
+ u32 vma_bind_flags = 0;
int ret;
+ if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+ flags |= PIN_GLOBAL;
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3342,8 +3346,10 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
- i915_gem_vma_bind(vma, obj->cache_level,
- flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+ if (flags & PIN_GLOBAL_ALIASED)
+ vma_bind_flags = GLOBAL_BIND | ALIASING_BIND;
+
+ i915_gem_vma_bind(vma, obj->cache_level, vma_bind_flags);
i915_gem_verify_gtt(dev);
return vma;
@@ -3546,9 +3552,14 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
- if (drm_mm_node_allocated(&vma->node))
- i915_gem_vma_bind(vma, cache_level,
- obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+ if (drm_mm_node_allocated(&vma->node)) {
+ u32 bind_flags = 0;
+ if (obj->has_global_gtt_mapping)
+ bind_flags |= GLOBAL_BIND;
+ if (obj->has_aliasing_ppgtt_mapping)
+ bind_flags |= ALIASING_BIND;
+ i915_gem_vma_bind(vma, cache_level, bind_flags);
+ }
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3884,7 +3895,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
return -ENODEV;
- if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
+ if (WARN_ON(flags & (PIN_GLOBAL_ALIASED | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
vma = i915_gem_obj_to_vma(obj, vm);
@@ -3916,8 +3927,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return PTR_ERR(vma);
}
- if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
- i915_gem_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ if (flags & PIN_GLOBAL_ALIASED) {
+ u32 bind_flags = 0;
+ if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+ bind_flags |= GLOBAL_BIND;
+ if (flags & PIN_GLOBAL && !obj->has_aliasing_ppgtt_mapping)
+ bind_flags |= ALIASING_BIND;
+ i915_gem_vma_bind(vma, obj->cache_level, bind_flags);
+ }
vma->pin_count++;
if (flags & PIN_MAPPABLE)
@@ -370,7 +370,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
list_first_entry(&target_i915_obj->vma_list,
typeof(*vma), vma_link);
i915_gem_vma_bind(vma, target_i915_obj->cache_level,
- GLOBAL_BIND);
+ GLOBAL_BIND | ALIASING_BIND);
}
/* Validate that the target is in a valid r/w GPU domain */
@@ -558,6 +558,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
if (need_fence || need_reloc_mappable(vma))
flags |= PIN_MAPPABLE;
+ /* FIXME: What kind of bind does Chris want? */
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
@@ -1267,7 +1268,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* allocate space first */
struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
BUG_ON(!vma);
- i915_gem_vma_bind(vma, batch_obj->cache_level, GLOBAL_BIND);
+ /* FIXME: Current secure dispatch code actually uses PPGTT. We
+ * need to fix this eventually */
+ i915_gem_vma_bind(vma, batch_obj->cache_level,
+ GLOBAL_BIND | ALIASING_BIND);
}
if (flags & I915_DISPATCH_SECURE)
@@ -1318,8 +1318,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
- obj->has_global_gtt_mapping = 0;
- i915_gem_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ if (obj->has_global_gtt_mapping || obj->has_aliasing_ppgtt_mapping) {
+ u32 bind_flags = 0;
+ if (obj->has_global_gtt_mapping)
+ bind_flags |= GLOBAL_BIND;
+ if (obj->has_aliasing_ppgtt_mapping)
+ bind_flags |= ALIASING_BIND;
+ obj->has_global_gtt_mapping = 0;
+ obj->has_aliasing_ppgtt_mapping = 0;
+ i915_gem_vma_bind(vma, obj->cache_level, bind_flags);
+ }
}
if (INTEL_INFO(dev)->gen >= 8) {
@@ -152,7 +152,11 @@ struct i915_vma {
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
+
+/* Only use this if you know you want a strictly global binding */
#define GLOBAL_BIND (1<<0)
+/* Only use this if you know you want a strictly aliased binding */
+#define ALIASING_BIND (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
The driver currently lets callers pin global, and then tries to do things correctly inside the function. Doing so has two downsides: 1. It's not possible to exclusively pin to a global, or an aliasing address space. 2. It's difficult to read, and understand. The eventual goal when realized should fix both of the issues. This patch which should have no functional impact begins to address these issues without intentionally breaking things. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 4 +++- drivers/gpu/drm/i915/i915_gem.c | 33 ++++++++++++++++++++++-------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 8 ++++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +++++++++-- drivers/gpu/drm/i915/i915_gem_gtt.h | 4 ++++ 5 files changed, 48 insertions(+), 13 deletions(-)