Message ID | 1375315222-4785-29-git-send-email-ben@bwidawsk.net (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Jul 31, 2013 at 05:00:21PM -0700, Ben Widawsky wrote: > Building on the last patch which created the new function pointers in > the VM for bind/unbind, here we actually put those new function pointers > to use. > > Split out as a separate patch to aid in review. I'm fine with squashing > into the previous patch if people request it. > > v2: Updated to address the smart ggtt which can do aliasing as needed > Make sure we bind to global gtt when mappable and fenceable. I thought > we could get away without this initialy, but we cannot. > > Signed-off-by: Ben Widawsky <ben@bwidawsk.net> I don't like how this is split since it's a (small but still) flip the world approach: First you create completely new code, then you rip out the old one and switch over. So this should definitely be squashed for easier review, and if its to big the split up into different refactoring steps (where each steps keeps all the code working while slowly transforming it). I'll punt for now on this here. -Daniel > --- > drivers/gpu/drm/i915/i915_drv.h | 9 ------ > drivers/gpu/drm/i915/i915_gem.c | 31 ++++++++----------- > drivers/gpu/drm/i915/i915_gem_context.c | 8 +++-- > drivers/gpu/drm/i915/i915_gem_execbuffer.c | 29 ++++++++---------- > drivers/gpu/drm/i915/i915_gem_gtt.c | 48 ++---------------------------- > 5 files changed, 34 insertions(+), 91 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 2849297..a9c3110 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -1938,17 +1938,8 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, > > /* i915_gem_gtt.c */ > void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); > -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, > - struct drm_i915_gem_object *obj, > - enum i915_cache_level cache_level); > -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, > - struct drm_i915_gem_object *obj); > - > void i915_gem_restore_gtt_mappings(struct drm_device *dev); > int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); > -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, > - enum i915_cache_level cache_level); > -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); > void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); > void i915_gem_init_global_gtt(struct drm_device *dev); > void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 24c1a91..1f35ae4 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -2631,12 +2631,8 @@ int i915_vma_unbind(struct i915_vma *vma) > > trace_i915_vma_unbind(vma); > > - if (obj->has_global_gtt_mapping && i915_is_ggtt(vma->vm)) > - i915_gem_gtt_unbind_object(obj); > - if (obj->has_aliasing_ppgtt_mapping) { > - i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); > - obj->has_aliasing_ppgtt_mapping = 0; > - } > + vma->vm->unbind_vma(vma); > + > i915_gem_gtt_finish_object(obj); > i915_gem_object_unpin_pages(obj); > > @@ -3354,7 +3350,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > enum i915_cache_level cache_level) > { > struct drm_device *dev = obj->base.dev; > - drm_i915_private_t *dev_priv = dev->dev_private; > struct i915_vma *vma; > int ret; > > @@ -3393,11 +3388,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > return ret; > } > > - if (obj->has_global_gtt_mapping) > - i915_gem_gtt_bind_object(obj, cache_level); > - if (obj->has_aliasing_ppgtt_mapping) > - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, > - obj, cache_level); > + list_for_each_entry(vma, &obj->vma_list, vma_link) > + vma->vm->bind_vma(vma, cache_level, 0); > } > > if (cache_level == I915_CACHE_NONE) { > @@ -3676,6 +3668,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, > bool map_and_fenceable, > bool nonblocking) > { > + const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0; > struct i915_vma *vma; > int ret; > > @@ -3704,20 +3697,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, > } > > if (!i915_gem_obj_bound(obj, vm)) { > - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; > - > ret = i915_gem_object_bind_to_vm(obj, vm, alignment, > map_and_fenceable, > nonblocking); > if (ret) > return ret; > > - if (!dev_priv->mm.aliasing_ppgtt) > - i915_gem_gtt_bind_object(obj, obj->cache_level); > - } > + vma = i915_gem_obj_to_vma(obj, vm); > + vm->bind_vma(vma, obj->cache_level, flags); > + } else > + vma = i915_gem_obj_to_vma(obj, vm); > > + /* Objects are created map and fenceable. If we bind an object > + * the first time, and we had aliasing PPGTT (and didn't request > + * GLOBAL), we'll need to do this on the second bind.*/ > if (!obj->has_global_gtt_mapping && map_and_fenceable) > - i915_gem_gtt_bind_object(obj, obj->cache_level); > + vm->bind_vma(vma, obj->cache_level, GLOBAL_BIND); > > obj->pin_count++; > obj->pin_mappable |= map_and_fenceable; > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c > index 147399c..10a5618 100644 > --- a/drivers/gpu/drm/i915/i915_gem_context.c > +++ b/drivers/gpu/drm/i915/i915_gem_context.c > @@ -391,6 +391,7 @@ mi_set_context(struct intel_ring_buffer *ring, > static int do_switch(struct i915_hw_context *to) > { > struct intel_ring_buffer *ring = to->ring; > + struct drm_i915_private *dev_priv = ring->dev->dev_private; > struct i915_hw_context *from = ring->last_context; > u32 hw_flags = 0; > int ret; > @@ -415,8 +416,11 @@ static int do_switch(struct i915_hw_context *to) > return ret; > } > > - if (!to->obj->has_global_gtt_mapping) > - i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); > + if (!to->obj->has_global_gtt_mapping) { > + struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, > + &dev_priv->gtt.base); > + vma->vm->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); > + } > > if (!to->is_initialized || is_default_context(to)) > hw_flags |= MI_RESTORE_INHIBIT; > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > index b8bb7f5..4719e74 100644 > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > @@ -230,8 +230,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, > if (unlikely(IS_GEN6(dev) && > reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && > !target_i915_obj->has_global_gtt_mapping)) { > - i915_gem_gtt_bind_object(target_i915_obj, > - target_i915_obj->cache_level); > + struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); > + vma->vm->bind_vma(vma, target_i915_obj->cache_level, > + GLOBAL_BIND); > } > > /* Validate that the target is in a valid r/w GPU domain */ > @@ -434,11 +435,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > struct intel_ring_buffer *ring, > bool *need_reloc) > { > - struct drm_i915_private *dev_priv = ring->dev->dev_private; > + struct drm_i915_gem_object *obj = vma->obj; > struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; > bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; > bool need_fence, need_mappable; > - struct drm_i915_gem_object *obj = vma->obj; > + u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) && > + !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0; > int ret; > > need_fence = > @@ -467,14 +469,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > } > } > > - /* Ensure ppgtt mapping exists if needed */ > - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { > - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, > - obj, obj->cache_level); > - > - obj->has_aliasing_ppgtt_mapping = 1; > - } > - > if (entry->offset != vma->node.start) { > entry->offset = vma->node.start; > *need_reloc = true; > @@ -485,9 +479,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; > } > > - if (entry->flags & EXEC_OBJECT_NEEDS_GTT && > - !obj->has_global_gtt_mapping) > - i915_gem_gtt_bind_object(obj, obj->cache_level); > + vma->vm->bind_vma(vma, obj->cache_level, flags); > > return 0; > } > @@ -1077,8 +1069,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, > * batch" bit. Hence we need to pin secure batches into the global gtt. > * hsw should have this fixed, but let's be paranoid and do it > * unconditionally for now. */ > - if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) > - i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); > + if (flags & I915_DISPATCH_SECURE && > + !batch_obj->has_global_gtt_mapping) { > + struct i915_vma *vma = i915_gem_obj_to_vma(batch_obj, vm); > + vm->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND); > + } > > ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); > if (ret) > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index 39ac266..74b5077 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -412,15 +412,6 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) > dev_priv->mm.aliasing_ppgtt = NULL; > } > > -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, > - struct drm_i915_gem_object *obj, > - enum i915_cache_level cache_level) > -{ > - ppgtt->base.insert_entries(&ppgtt->base, obj->pages, > - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, > - cache_level); > -} > - > static void __always_unused > gen6_ppgtt_bind_vma(struct i915_vma *vma, > enum i915_cache_level cache_level, > @@ -433,14 +424,6 @@ gen6_ppgtt_bind_vma(struct i915_vma *vma, > gen6_ppgtt_insert_entries(vma->vm, vma->obj->pages, entry, cache_level); > } > > -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, > - struct drm_i915_gem_object *obj) > -{ > - ppgtt->base.clear_range(&ppgtt->base, > - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, > - obj->base.size >> PAGE_SHIFT); > -} > - > static void __always_unused gen6_ppgtt_unbind_vma(struct i915_vma *vma) > { > const unsigned long entry = vma->node.start >> PAGE_SHIFT; > @@ -501,8 +484,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) > gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); > > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { > + struct i915_vma *vma = i915_gem_obj_to_vma(obj, > + &dev_priv->gtt.base); > i915_gem_clflush_object(obj); > - i915_gem_gtt_bind_object(obj, obj->cache_level); > + vma->vm->bind_vma(vma, obj->cache_level, 0); > } > > i915_gem_chipset_flush(dev); > @@ -658,33 +643,6 @@ static void gen6_ggtt_bind_vma(struct i915_vma *vma, > } > } > > -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, > - enum i915_cache_level cache_level) > -{ > - struct drm_device *dev = obj->base.dev; > - struct drm_i915_private *dev_priv = dev->dev_private; > - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; > - > - dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, > - entry, > - cache_level); > - > - obj->has_global_gtt_mapping = 1; > -} > - > -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) > -{ > - struct drm_device *dev = obj->base.dev; > - struct drm_i915_private *dev_priv = dev->dev_private; > - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; > - > - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, > - entry, > - obj->base.size >> PAGE_SHIFT); > - > - obj->has_global_gtt_mapping = 0; > -} > - > static void gen6_ggtt_unbind_vma(struct i915_vma *vma) > { > struct drm_device *dev = vma->vm->dev; > -- > 1.8.3.4 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2849297..a9c3110 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1938,17 +1938,8 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, /* i915_gem_gtt.c */ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj); - void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); void i915_gem_init_global_gtt(struct drm_device *dev); void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 24c1a91..1f35ae4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2631,12 +2631,8 @@ int i915_vma_unbind(struct i915_vma *vma) trace_i915_vma_unbind(vma); - if (obj->has_global_gtt_mapping && i915_is_ggtt(vma->vm)) - i915_gem_gtt_unbind_object(obj); - if (obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); - obj->has_aliasing_ppgtt_mapping = 0; - } + vma->vm->unbind_vma(vma); + i915_gem_gtt_finish_object(obj); i915_gem_object_unpin_pages(obj); @@ -3354,7 +3350,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; struct i915_vma *vma; int ret; @@ -3393,11 +3388,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return ret; } - if (obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(obj, cache_level); - if (obj->has_aliasing_ppgtt_mapping) - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, cache_level); + list_for_each_entry(vma, &obj->vma_list, vma_link) + vma->vm->bind_vma(vma, cache_level, 0); } if (cache_level == I915_CACHE_NONE) { @@ -3676,6 +3668,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, bool map_and_fenceable, bool nonblocking) { + const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0; struct i915_vma *vma; int ret; @@ -3704,20 +3697,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } if (!i915_gem_obj_bound(obj, vm)) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - ret = i915_gem_object_bind_to_vm(obj, vm, alignment, map_and_fenceable, nonblocking); if (ret) return ret; - if (!dev_priv->mm.aliasing_ppgtt) - i915_gem_gtt_bind_object(obj, obj->cache_level); - } + vma = i915_gem_obj_to_vma(obj, vm); + vm->bind_vma(vma, obj->cache_level, flags); + } else + vma = i915_gem_obj_to_vma(obj, vm); + /* Objects are created map and fenceable. If we bind an object + * the first time, and we had aliasing PPGTT (and didn't request + * GLOBAL), we'll need to do this on the second bind.*/ if (!obj->has_global_gtt_mapping && map_and_fenceable) - i915_gem_gtt_bind_object(obj, obj->cache_level); + vm->bind_vma(vma, obj->cache_level, GLOBAL_BIND); obj->pin_count++; obj->pin_mappable |= map_and_fenceable; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 147399c..10a5618 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -391,6 +391,7 @@ mi_set_context(struct intel_ring_buffer *ring, static int do_switch(struct i915_hw_context *to) { struct intel_ring_buffer *ring = to->ring; + struct drm_i915_private *dev_priv = ring->dev->dev_private; struct i915_hw_context *from = ring->last_context; u32 hw_flags = 0; int ret; @@ -415,8 +416,11 @@ static int do_switch(struct i915_hw_context *to) return ret; } - if (!to->obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); + if (!to->obj->has_global_gtt_mapping) { + struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, + &dev_priv->gtt.base); + vma->vm->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); + } if (!to->is_initialized || is_default_context(to)) hw_flags |= MI_RESTORE_INHIBIT; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b8bb7f5..4719e74 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -230,8 +230,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (unlikely(IS_GEN6(dev) && reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && !target_i915_obj->has_global_gtt_mapping)) { - i915_gem_gtt_bind_object(target_i915_obj, - target_i915_obj->cache_level); + struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); + vma->vm->bind_vma(vma, target_i915_obj->cache_level, + GLOBAL_BIND); } /* Validate that the target is in a valid r/w GPU domain */ @@ -434,11 +435,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct intel_ring_buffer *ring, bool *need_reloc) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence, need_mappable; - struct drm_i915_gem_object *obj = vma->obj; + u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) && + !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0; int ret; need_fence = @@ -467,14 +469,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, } } - /* Ensure ppgtt mapping exists if needed */ - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, obj->cache_level); - - obj->has_aliasing_ppgtt_mapping = 1; - } - if (entry->offset != vma->node.start) { entry->offset = vma->node.start; *need_reloc = true; @@ -485,9 +479,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; } - if (entry->flags & EXEC_OBJECT_NEEDS_GTT && - !obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(obj, obj->cache_level); + vma->vm->bind_vma(vma, obj->cache_level, flags); return 0; } @@ -1077,8 +1069,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but let's be paranoid and do it * unconditionally for now. */ - if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); + if (flags & I915_DISPATCH_SECURE && + !batch_obj->has_global_gtt_mapping) { + struct i915_vma *vma = i915_gem_obj_to_vma(batch_obj, vm); + vm->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND); + } ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 39ac266..74b5077 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -412,15 +412,6 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) dev_priv->mm.aliasing_ppgtt = NULL; } -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level) -{ - ppgtt->base.insert_entries(&ppgtt->base, obj->pages, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - cache_level); -} - static void __always_unused gen6_ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, @@ -433,14 +424,6 @@ gen6_ppgtt_bind_vma(struct i915_vma *vma, gen6_ppgtt_insert_entries(vma->vm, vma->obj->pages, entry, cache_level); } -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_object *obj) -{ - ppgtt->base.clear_range(&ppgtt->base, - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); -} - static void __always_unused gen6_ppgtt_unbind_vma(struct i915_vma *vma) { const unsigned long entry = vma->node.start >> PAGE_SHIFT; @@ -501,8 +484,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + struct i915_vma *vma = i915_gem_obj_to_vma(obj, + &dev_priv->gtt.base); i915_gem_clflush_object(obj); - i915_gem_gtt_bind_object(obj, obj->cache_level); + vma->vm->bind_vma(vma, obj->cache_level, 0); } i915_gem_chipset_flush(dev); @@ -658,33 +643,6 @@ static void gen6_ggtt_bind_vma(struct i915_vma *vma, } } -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; - - dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, - entry, - cache_level); - - obj->has_global_gtt_mapping = 1; -} - -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; - - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - entry, - obj->base.size >> PAGE_SHIFT); - - obj->has_global_gtt_mapping = 0; -} - static void gen6_ggtt_unbind_vma(struct i915_vma *vma) { struct drm_device *dev = vma->vm->dev;
Building on the last patch which created the new function pointers in the VM for bind/unbind, here we actually put those new function pointers to use. Split out as a separate patch to aid in review. I'm fine with squashing into the previous patch if people request it. v2: Updated to address the smart ggtt which can do aliasing as needed Make sure we bind to global gtt when mappable and fenceable. I thought we could get away without this initialy, but we cannot. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 9 ------ drivers/gpu/drm/i915/i915_gem.c | 31 ++++++++----------- drivers/gpu/drm/i915/i915_gem_context.c | 8 +++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 29 ++++++++---------- drivers/gpu/drm/i915/i915_gem_gtt.c | 48 ++---------------------------- 5 files changed, 34 insertions(+), 91 deletions(-)