Message ID | 1375315222-4785-11-git-send-email-ben@bwidawsk.net (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Jul 31, 2013 at 05:00:03PM -0700, Ben Widawsky wrote: > For now, objects will maintain the same cache levels amongst all address > spaces. This is to limit the risk of bugs, as playing with cacheability > in the different domains can be very error prone. > > In the future, it may be optimal to allow setting domains per VMA (ie. > an object bound into an address space). > > Signed-off-by: Ben Widawsky <ben@bwidawsk.net> > --- > drivers/gpu/drm/i915/i915_gem.c | 20 ++++++++++++-------- > 1 file changed, 12 insertions(+), 8 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 3ce9d0d..adb0a18 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -3308,7 +3308,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > { > struct drm_device *dev = obj->base.dev; > drm_i915_private_t *dev_priv = dev->dev_private; > - struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); > + struct i915_vma *vma; > int ret; > > if (obj->cache_level == cache_level) > @@ -3319,13 +3319,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > return -EBUSY; > } > > - if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { > - ret = i915_gem_object_unbind(obj); > - if (ret) > - return ret; > + list_for_each_entry(vma, &obj->vma_list, vma_link) { > + if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { > + ret = i915_gem_object_unbind(obj); > + if (ret) > + return ret; > + > + break; > + } > } > > - if (i915_gem_obj_ggtt_bound(obj)) { > + if (i915_gem_obj_bound_any(obj)) { Hm, I guess this will change later on to a for_each_vma loop? Patch applied meanwhile. -Daniel > ret = i915_gem_object_finish_gpu(obj); > if (ret) > return ret; > @@ -3347,8 +3351,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > if (obj->has_aliasing_ppgtt_mapping) > i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, > obj, cache_level); > - > - i915_gem_obj_to_vma(obj, &dev_priv->gtt.base)->node.color = cache_level; > } > > if (cache_level == I915_CACHE_NONE) { > @@ -3374,6 +3376,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > old_write_domain); > } > > + list_for_each_entry(vma, &obj->vma_list, vma_link) > + vma->node.color = cache_level; > obj->cache_level = cache_level; > i915_gem_verify_gtt(dev); > return 0; > -- > 1.8.3.4 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3ce9d0d..adb0a18 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3308,7 +3308,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); + struct i915_vma *vma; int ret; if (obj->cache_level == cache_level) @@ -3319,13 +3319,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } - if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { - ret = i915_gem_object_unbind(obj); - if (ret) - return ret; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + + break; + } } - if (i915_gem_obj_ggtt_bound(obj)) { + if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -3347,8 +3351,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); - - i915_gem_obj_to_vma(obj, &dev_priv->gtt.base)->node.color = cache_level; } if (cache_level == I915_CACHE_NONE) { @@ -3374,6 +3376,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, old_write_domain); } + list_for_each_entry(vma, &obj->vma_list, vma_link) + vma->node.color = cache_level; obj->cache_level = cache_level; i915_gem_verify_gtt(dev); return 0;
For now, objects will maintain the same cache levels amongst all address spaces. This is to limit the risk of bugs, as playing with cacheability in the different domains can be very error prone. In the future, it may be optimal to allow setting domains per VMA (ie. an object bound into an address space). Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_gem.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-)