@@ -77,7 +77,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
case I915_GEM_DOMAIN_RENDER:
if (gpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
+ obj->cache_dirty = !IS_DGFX(to_i915(obj->base.dev));
break;
}
@@ -275,7 +275,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
/* Always invalidate stale cachelines */
if (obj->cache_level != cache_level) {
i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->cache_dirty = true;
+ obj->cache_dirty = !IS_DGFX(to_i915(obj->base.dev));
}
/* The cache-level will be applied when each vma is rebound. */
When i915_gem_object_set_cache_level sets the GEM object's cache_dirty to true, in the case of TTM that will sometimes be overwritten when getting the object's pages, more specifically for shmem-placed objects for which its ttm structure has just been populated. This wasn't an issue so far, even though intel_dpt_create was setting the object's cache level to 'none', regardless of the platform and memory placement of the framebuffer. However, commit b6f17c183a3e ("drm/i915/ttm: dont trample cache_level overrides during ttm move") makes sure the cache level set by older backends soon to be managed by TTM isn't altered after their TTM bo ttm structure is populated. However this led to the 'obj->cache_dirty = true' set in i915_gem_object_set_cache_level and flush_write_domain to stick around rather than being reset inside i915_ttm_adjust_gem_after_move after calling ttm_tt_populate in __i915_ttm_get_pages, which eventually led to a warning in DGFX platforms. Make sure it's not set in DGFX platforms. Signed-off-by: Adrian Larumbe <adrian.larumbe@collabora.com> --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)