@@ -3243,7 +3243,6 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- bool flush;
i915_check_and_clear_faults(dev);
@@ -3251,21 +3250,29 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true);
+ /* Coming from a hibernation image, the pages will have
+ * been written to by the cpu.
+ */
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
/* Cache flush objects bound into GGTT and rebind them. */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- flush = false;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base)
continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE));
-
- flush = true;
}
- if (flush)
- i915_gem_clflush_object(obj, obj->pin_display);
+ if (obj->pin_display)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
if (INTEL_INFO(dev)->gen >= 8) {
During hibernation, all objects will have had their page contents written to disk and then restored upon resume. This means that every page will be dirty and we need to treat all objects as being in the CPU domain and require their contents to be flushed before use. At present we only do so for those objects bound into the Global GTT, however we need to mark all allocated objects as being unclean. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-)