@@ -235,6 +235,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
+ GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
@@ -667,11 +668,13 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
-static void
+void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
if (!(obj->base.write_domain & flush_domains))
return;
@@ -695,15 +698,14 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
- intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
}
intel_fb_obj_flush(obj,
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+ intel_runtime_pm_put(dev_priv);
break;
case I915_GEM_DOMAIN_CPU:
@@ -3425,6 +3427,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
+ GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
obj->base.write_domain = 0;
}
@@ -3555,6 +3558,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
+ intel_runtime_pm_get_noresume(to_i915(obj->base.dev));
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
@@ -4394,6 +4398,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
trace_i915_gem_object_destroy(obj);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+
GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn,
&obj->vma_list, obj_link) {
@@ -1865,6 +1865,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
i915_gem_active_set(&vma->last_read[idx], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
+ if (obj->base.write_domain & I915_GEM_DOMAIN_GTT)
+ intel_runtime_pm_put(to_i915(obj->base.dev));
obj->base.write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
@@ -421,5 +421,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
+void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains);
+
#endif
@@ -127,8 +127,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
- if (i915_gem_object_unbind(obj) == 0)
+ if (i915_gem_object_unbind(obj) == 0) {
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ }
return !READ_ONCE(obj->mm.pages);
}
@@ -866,6 +866,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
i915_ggtt_offset(ce->ring->vma);
ce->state->obj->mm.dirty = true;
+ flush_write_domain(ce->state->obj, ~0);
i915_gem_context_get(ctx);
out:
@@ -1321,6 +1321,8 @@ int intel_ring_pin(struct intel_ring *ring,
if (IS_ERR(addr))
goto err;
+ flush_write_domain(vma->obj, ~0);
+
ring->vaddr = addr;
return 0;
@@ -1516,6 +1518,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
goto err;
ce->state->obj->mm.dirty = true;
+ flush_write_domain(ce->state->obj, ~0);
}
/* The kernel context is only used as a placeholder for flushing the