Message ID | 1444285469-8187-7-git-send-email-ankitprasad.r.sharma@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Oct 08, 2015 at 11:54:29AM +0530, ankitprasad.r.sharma@intel.com wrote: > + /* stolen objects are already pinned to prevent shrinkage */ > + memset(&node, 0, sizeof(node)); > + ret = drm_mm_insert_node_in_range_generic(&i915->gtt.base.mm, > + &node, > + 4096, 0, I915_CACHE_NONE, > + 0, i915->gtt.mappable_end, > + DRM_MM_SEARCH_DEFAULT, > + DRM_MM_CREATE_DEFAULT); > + if (ret) > + return ret; > + > + i915->gtt.base.insert_entries(&i915->gtt.base, obj->pages, > + node.start, I915_CACHE_NONE, 0); This was written using an insert_page() function you don't have. Either grab that as well, or you need to pin the entire object into the GGTT, i.e. i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); note that to do so will also need to be very careful to handle the pinning of obj->pages and the introduction of a new GGTT vma. > + > + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { > + struct page *page; > + void *__iomem src; > + void *dst; > + > + page = shmem_read_mapping_page(mapping, i); > + if (IS_ERR(page)) { > + ret = PTR_ERR(page); > + goto err_node; > + } > + > + src = io_mapping_map_atomic_wc(i915->gtt.mappable, node.start + PAGE_SIZE * i); > + dst = kmap_atomic(page); > + memcpy_fromio(dst, src, PAGE_SIZE); > + kunmap_atomic(dst); > + io_mapping_unmap_atomic(src); > + > + page_cache_release(page); > + } > + > + wmb(); > + i915->gtt.base.clear_range(&i915->gtt.base, > + node.start, node.size, > + true); > + drm_mm_remove_node(&node); > + > +swap_pages: > + stolen_pages = obj->pages; > + obj->pages = NULL; > + > + obj->base.filp = file; > + obj->base.read_domains = I915_GEM_DOMAIN_CPU; > + obj->base.write_domain = I915_GEM_DOMAIN_CPU; > + > + /* Recreate any pinned binding with pointers to the new storage */ > + if (!list_empty(&obj->vma_list)) { > + ret = i915_gem_object_get_pages_gtt(obj); > + if (ret) { > + obj->pages = stolen_pages; > + goto err_file; > + } > + > + ret = i915_gem_gtt_prepare_object(obj); > + if (ret) { > + i915_gem_object_put_pages_gtt(obj); > + obj->pages = stolen_pages; > + goto err_file; > + } > + > + ret = i915_gem_object_set_to_gtt_domain(obj, true); > + if (ret) { > + i915_gem_gtt_finish_object(obj); > + i915_gem_object_put_pages_gtt(obj); > + obj->pages = stolen_pages; > + goto err_file; > + } > + > + obj->get_page.sg = obj->pages->sgl; > + obj->get_page.last = 0; > + > + list_for_each_entry(vma, &obj->vma_list, vma_link) { > + if (!drm_mm_node_allocated(&vma->node)) > + continue; > + > + WARN_ON(i915_vma_bind(vma, > + obj->cache_level, > + PIN_UPDATE)); > + } > + } else > + list_del(&obj->global_list); > + > + /* drop the stolen pin and backing */ > + shmemfs_pages = obj->pages; > + obj->pages = stolen_pages; > + > + i915_gem_object_unpin_pages(obj); > + obj->ops->put_pages(obj); > + if (obj->ops->release) > + obj->ops->release(obj); > + > + obj->ops = &i915_gem_object_ops; > + obj->pages = shmemfs_pages; > + > + return 0; > + > +err_node: > + wmb(); > + i915->gtt.base.clear_range(&i915->gtt.base, > + node.start, node.size, > + true); > + drm_mm_remove_node(&node); > +err_file: > + fput(file); > + obj->base.filp = NULL; > + return ret; > +} > + > +int > +i915_gem_freeze(struct drm_device *dev) > +{ > + /* Called before i915_gem_suspend() when hibernating */ > + struct drm_i915_private *i915 = to_i915(dev); > + struct drm_i915_gem_object *obj, *tmp; > + struct list_head *phase[] = { > + &i915->mm.unbound_list, &i915->mm.bound_list, NULL > + }, **p; > + > + /* Across hibernation, the stolen area is not preserved. > + * Anything inside stolen must copied back to normal > + * memory if we wish to preserve it. > + */ > + for (p = phase; *p; p++) { Didn't we introduce a list of stolen objects in one of the other patches? > + struct list_head migrate; > + int ret; > + > + INIT_LIST_HEAD(&migrate); > + list_for_each_entry_safe(obj, tmp, *p, global_list) { > + if (obj->stolen == NULL) > + continue; > + > + if (obj->internal_volatile) > + continue; > + > + /* In the general case, this object may only be alive > + * due to an active reference, and that may disappear > + * when we unbind any of the objects (and so wait upon > + * the GPU and retire requests). To prevent one of the > + * objects from disappearing beneath us, we need to > + * take a reference to each as we build the migration > + * list. > + * > + * This is similar to the strategy required whilst > + * shrinking or evicting objects (for the same reason). > + */ > + drm_gem_object_reference(&obj->base); > + list_move(&obj->global_list, &migrate); > + } > + > + ret = 0; > + list_for_each_entry_safe(obj, tmp, &migrate, global_list) { > + if (ret == 0) > + ret = i915_gem_object_migrate_stolen_to_shmemfs(obj); > + drm_gem_object_unreference(&obj->base); > + } > + list_splice(&migrate, *p); > + if (ret) > + return ret; > + } > + > + return 0; > +}
On Thu, 2015-10-08 at 12:02 +0100, Chris Wilson wrote: > On Thu, Oct 08, 2015 at 11:54:29AM +0530, ankitprasad.r.sharma@intel.com wrote: > > + /* stolen objects are already pinned to prevent shrinkage */ > > + memset(&node, 0, sizeof(node)); > > + ret = drm_mm_insert_node_in_range_generic(&i915->gtt.base.mm, > > + &node, > > + 4096, 0, I915_CACHE_NONE, > > + 0, i915->gtt.mappable_end, > > + DRM_MM_SEARCH_DEFAULT, > > + DRM_MM_CREATE_DEFAULT); > > + if (ret) > > + return ret; > > + > > + i915->gtt.base.insert_entries(&i915->gtt.base, obj->pages, > > + node.start, I915_CACHE_NONE, 0); > > This was written using an insert_page() function you don't have. Either > grab that as well, or you need to pin the entire object into the GGTT, > i.e. i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); note that to do so > will also need to be very careful to handle the pinning of obj->pages > and the introduction of a new GGTT vma. > > > + > > + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { > > + struct page *page; > > + void *__iomem src; > > + void *dst; > > + > > + page = shmem_read_mapping_page(mapping, i); > > + if (IS_ERR(page)) { > > + ret = PTR_ERR(page); > > + goto err_node; > > + } > > + > > + src = io_mapping_map_atomic_wc(i915->gtt.mappable, node.start + PAGE_SIZE * i); > > + dst = kmap_atomic(page); > > + memcpy_fromio(dst, src, PAGE_SIZE); > > + kunmap_atomic(dst); > > + io_mapping_unmap_atomic(src); > > + > > + page_cache_release(page); > > + } > > + > > + wmb(); > > + i915->gtt.base.clear_range(&i915->gtt.base, > > + node.start, node.size, > > + true); > > + drm_mm_remove_node(&node); > > + > > +swap_pages: > > + stolen_pages = obj->pages; > > + obj->pages = NULL; > > + > > + obj->base.filp = file; > > + obj->base.read_domains = I915_GEM_DOMAIN_CPU; > > + obj->base.write_domain = I915_GEM_DOMAIN_CPU; > > + > > + /* Recreate any pinned binding with pointers to the new storage */ > > + if (!list_empty(&obj->vma_list)) { > > + ret = i915_gem_object_get_pages_gtt(obj); > > + if (ret) { > > + obj->pages = stolen_pages; > > + goto err_file; > > + } > > + > > + ret = i915_gem_gtt_prepare_object(obj); > > + if (ret) { > > + i915_gem_object_put_pages_gtt(obj); > > + obj->pages = stolen_pages; > > + goto err_file; > > + } > > + > > + ret = i915_gem_object_set_to_gtt_domain(obj, true); > > + if (ret) { > > + i915_gem_gtt_finish_object(obj); > > + i915_gem_object_put_pages_gtt(obj); > > + obj->pages = stolen_pages; > > + goto err_file; > > + } > > + > > + obj->get_page.sg = obj->pages->sgl; > > + obj->get_page.last = 0; > > + > > + list_for_each_entry(vma, &obj->vma_list, vma_link) { > > + if (!drm_mm_node_allocated(&vma->node)) > > + continue; > > + > > + WARN_ON(i915_vma_bind(vma, > > + obj->cache_level, > > + PIN_UPDATE)); > > + } > > + } else > > + list_del(&obj->global_list); > > + > > + /* drop the stolen pin and backing */ > > + shmemfs_pages = obj->pages; > > + obj->pages = stolen_pages; > > + > > + i915_gem_object_unpin_pages(obj); > > + obj->ops->put_pages(obj); > > + if (obj->ops->release) > > + obj->ops->release(obj); > > + > > + obj->ops = &i915_gem_object_ops; > > + obj->pages = shmemfs_pages; > > + > > + return 0; > > + > > +err_node: > > + wmb(); > > + i915->gtt.base.clear_range(&i915->gtt.base, > > + node.start, node.size, > > + true); > > + drm_mm_remove_node(&node); > > +err_file: > > + fput(file); > > + obj->base.filp = NULL; > > + return ret; > > +} > > + > > +int > > +i915_gem_freeze(struct drm_device *dev) > > +{ > > + /* Called before i915_gem_suspend() when hibernating */ > > + struct drm_i915_private *i915 = to_i915(dev); > > + struct drm_i915_gem_object *obj, *tmp; > > + struct list_head *phase[] = { > > + &i915->mm.unbound_list, &i915->mm.bound_list, NULL > > + }, **p; > > + > > + /* Across hibernation, the stolen area is not preserved. > > + * Anything inside stolen must copied back to normal > > + * memory if we wish to preserve it. > > + */ > > + for (p = phase; *p; p++) { > > Didn't we introduce a list of stolen objects in one of the other > patches? Yes, but that list is only for purgeable objects. + /** + * List of stolen objects that have been marked as purgeable and + * thus available for reaping if we need more space for a new + * allocation. Ordered by time of marking purgeable. + */ + struct list_head stolen_list; + Thanks, Ankit
On Thu, 2015-10-08 at 12:02 +0100, Chris Wilson wrote: > On Thu, Oct 08, 2015 at 11:54:29AM +0530, ankitprasad.r.sharma@intel.com wrote: > > + /* stolen objects are already pinned to prevent shrinkage */ > > + memset(&node, 0, sizeof(node)); > > + ret = drm_mm_insert_node_in_range_generic(&i915->gtt.base.mm, > > + &node, > > + 4096, 0, I915_CACHE_NONE, > > + 0, i915->gtt.mappable_end, > > + DRM_MM_SEARCH_DEFAULT, > > + DRM_MM_CREATE_DEFAULT); > > + if (ret) > > + return ret; > > + > > + i915->gtt.base.insert_entries(&i915->gtt.base, obj->pages, > > + node.start, I915_CACHE_NONE, 0); > > This was written using an insert_page() function you don't have. Either > grab that as well, or you need to pin the entire object into the GGTT, > i.e. i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); note that to do so > will also need to be very careful to handle the pinning of obj->pages > and the introduction of a new GGTT vma. We thought to implement the second alternative, but as you mentioned handling the pinning of obj->pages and the introduction of a new GGTT vma, is a bit messy. Can you please share the insert_page() function? Thanks, Ankit
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e6d7a69..7663fb4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -968,6 +968,21 @@ static int i915_pm_suspend(struct device *dev) return i915_drm_suspend(drm_dev); } +static int i915_pm_freeze(struct device *dev) +{ + int ret; + + ret = i915_gem_freeze(pci_get_drvdata(to_pci_dev(dev))); + if (ret) + return ret; + + ret = i915_pm_suspend(dev); + if (ret) + return ret; + + return 0; +} + static int i915_pm_suspend_late(struct device *dev) { struct drm_device *drm_dev = dev_to_i915(dev)->dev; @@ -1621,7 +1636,7 @@ static const struct dev_pm_ops i915_pm_ops = { * @restore, @restore_early : called after rebooting and restoring the * hibernation image [PMSG_RESTORE] */ - .freeze = i915_pm_suspend, + .freeze = i915_pm_freeze, .freeze_late = i915_pm_suspend_late, .thaw_early = i915_pm_resume_early, .thaw = i915_pm_resume, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5612df3..1efa3b6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2094,6 +2094,12 @@ struct drm_i915_gem_object { * Advice: are the backing pages purgeable? */ unsigned int madv:2; + /** + * Whereas madv is for userspace, there are certain situations + * where we want I915_MADV_DONTNEED behaviour on internal objects + * without conflating the userspace setting. + */ + unsigned int internal_volatile:1; /** * Current tiling mode for the object. @@ -2981,6 +2987,7 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); +int __must_check i915_gem_freeze(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct drm_i915_gem_request *req, struct drm_i915_gem_object *batch_obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2c94e22..843f3d1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4365,12 +4365,27 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = { .put_pages = i915_gem_object_put_pages_gtt, }; +static struct address_space * +i915_gem_set_inode_gfp(struct drm_device *dev, struct file *file) +{ + struct address_space *mapping = file_inode(file)->i_mapping; + gfp_t mask; + + mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; + if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { + /* 965gm cannot relocate objects above 4GiB. */ + mask &= ~__GFP_HIGHMEM; + mask |= __GFP_DMA32; + } + mapping_set_gfp_mask(mapping, mask); + + return mapping; +} + struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size) { struct drm_i915_gem_object *obj; - struct address_space *mapping; - gfp_t mask; int ret = 0; obj = i915_gem_object_alloc(dev); @@ -4382,15 +4397,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, return ERR_PTR(ret); } - mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; - if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { - /* 965gm cannot relocate objects above 4GiB. */ - mask &= ~__GFP_HIGHMEM; - mask |= __GFP_DMA32; - } - - mapping = file_inode(obj->base.filp)->i_mapping; - mapping_set_gfp_mask(mapping, mask); + i915_gem_set_inode_gfp(dev, obj->base.filp); i915_gem_object_init(obj, &i915_gem_object_ops); @@ -4567,6 +4574,207 @@ i915_gem_stop_ringbuffers(struct drm_device *dev) dev_priv->gt.stop_ring(ring); } +static int +i915_gem_object_migrate_stolen_to_shmemfs(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_vma *vma, *vn; + struct drm_mm_node node; + struct file *file; + struct address_space *mapping; + struct sg_table *stolen_pages, *shmemfs_pages; + int ret, i; + + if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) + return -EINVAL; + + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + return ret; + + file = shmem_file_setup("drm mm object", obj->base.size, VM_NORESERVE); + if (IS_ERR(file)) + return PTR_ERR(file); + mapping = i915_gem_set_inode_gfp(obj->base.dev, file); + + list_for_each_entry_safe(vma, vn, &obj->vma_list, vma_link) + if (i915_vma_unbind(vma)) + continue; + + if (obj->madv != I915_MADV_WILLNEED && list_empty(&obj->vma_list)) { + /* Discard the stolen reservation, and replace with + * an unpopulated shmemfs object. + */ + obj->madv = __I915_MADV_PURGED; + goto swap_pages; + } + + /* stolen objects are already pinned to prevent shrinkage */ + memset(&node, 0, sizeof(node)); + ret = drm_mm_insert_node_in_range_generic(&i915->gtt.base.mm, + &node, + 4096, 0, I915_CACHE_NONE, + 0, i915->gtt.mappable_end, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + if (ret) + return ret; + + i915->gtt.base.insert_entries(&i915->gtt.base, obj->pages, + node.start, I915_CACHE_NONE, 0); + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + void *__iomem src; + void *dst; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err_node; + } + + src = io_mapping_map_atomic_wc(i915->gtt.mappable, node.start + PAGE_SIZE * i); + dst = kmap_atomic(page); + memcpy_fromio(dst, src, PAGE_SIZE); + kunmap_atomic(dst); + io_mapping_unmap_atomic(src); + + page_cache_release(page); + } + + wmb(); + i915->gtt.base.clear_range(&i915->gtt.base, + node.start, node.size, + true); + drm_mm_remove_node(&node); + +swap_pages: + stolen_pages = obj->pages; + obj->pages = NULL; + + obj->base.filp = file; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + + /* Recreate any pinned binding with pointers to the new storage */ + if (!list_empty(&obj->vma_list)) { + ret = i915_gem_object_get_pages_gtt(obj); + if (ret) { + obj->pages = stolen_pages; + goto err_file; + } + + ret = i915_gem_gtt_prepare_object(obj); + if (ret) { + i915_gem_object_put_pages_gtt(obj); + obj->pages = stolen_pages; + goto err_file; + } + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) { + i915_gem_gtt_finish_object(obj); + i915_gem_object_put_pages_gtt(obj); + obj->pages = stolen_pages; + goto err_file; + } + + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (!drm_mm_node_allocated(&vma->node)) + continue; + + WARN_ON(i915_vma_bind(vma, + obj->cache_level, + PIN_UPDATE)); + } + } else + list_del(&obj->global_list); + + /* drop the stolen pin and backing */ + shmemfs_pages = obj->pages; + obj->pages = stolen_pages; + + i915_gem_object_unpin_pages(obj); + obj->ops->put_pages(obj); + if (obj->ops->release) + obj->ops->release(obj); + + obj->ops = &i915_gem_object_ops; + obj->pages = shmemfs_pages; + + return 0; + +err_node: + wmb(); + i915->gtt.base.clear_range(&i915->gtt.base, + node.start, node.size, + true); + drm_mm_remove_node(&node); +err_file: + fput(file); + obj->base.filp = NULL; + return ret; +} + +int +i915_gem_freeze(struct drm_device *dev) +{ + /* Called before i915_gem_suspend() when hibernating */ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_object *obj, *tmp; + struct list_head *phase[] = { + &i915->mm.unbound_list, &i915->mm.bound_list, NULL + }, **p; + + /* Across hibernation, the stolen area is not preserved. + * Anything inside stolen must copied back to normal + * memory if we wish to preserve it. + */ + for (p = phase; *p; p++) { + struct list_head migrate; + int ret; + + INIT_LIST_HEAD(&migrate); + list_for_each_entry_safe(obj, tmp, *p, global_list) { + if (obj->stolen == NULL) + continue; + + if (obj->internal_volatile) + continue; + + /* In the general case, this object may only be alive + * due to an active reference, and that may disappear + * when we unbind any of the objects (and so wait upon + * the GPU and retire requests). To prevent one of the + * objects from disappearing beneath us, we need to + * take a reference to each as we build the migration + * list. + * + * This is similar to the strategy required whilst + * shrinking or evicting objects (for the same reason). + */ + drm_gem_object_reference(&obj->base); + list_move(&obj->global_list, &migrate); + } + + ret = 0; + list_for_each_entry_safe(obj, tmp, &migrate, global_list) { + if (ret == 0) + ret = i915_gem_object_migrate_stolen_to_shmemfs(obj); + drm_gem_object_unreference(&obj->base); + } + list_splice(&migrate, *p); + if (ret) + return ret; + } + + return 0; +} + int i915_gem_suspend(struct drm_device *dev) { diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d08989a..6791c18 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2517,6 +2517,9 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, if (IS_ERR(obj)) return false; + /* Not to be preserved across hibernation */ + obj->internal_volatile = true; + obj->tiling_mode = plane_config->tiling; if (obj->tiling_mode == I915_TILING_X) obj->stride = fb->pitches[0]; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 67de958..dd2ce4d 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -148,6 +148,12 @@ static int intelfb_alloc(struct drm_fb_helper *helper, goto out; } + /* Discard the contents of the BIOS fb across hibernation. + * We really want to completely throwaway the earlier fbdev + * and reconfigure it anyway. + */ + obj->internal_volatile = true; + fb = __intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) { ret = PTR_ERR(fb); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 12bf162..eec1131 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5396,6 +5396,8 @@ static void valleyview_setup_pctx(struct drm_device *dev) I915_WRITE(VLV_PCBR, pctx_paddr); out: + /* The power context need not be preserved across hibernation */ + pctx->internal_volatile = true; DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a928602..cd19776 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2025,6 +2025,12 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev, if (IS_ERR(obj)) return PTR_ERR(obj); + /* Ringbuffer objects are by definition volatile - only the commands + * between HEAD and TAIL need to be preserved and whilst there are + * any commands there, the ringbuffer is pinned by activity. + */ + obj->internal_volatile = true; + /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1;