@@ -1972,6 +1972,8 @@ int i915_gem_fault(struct vm_fault *vmf)
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
+ if (ret == 0)
+ vma->flags |= I915_VMA_USERFAULT;
err_unpin:
__i915_vma_unpin(vma);
@@ -2024,6 +2026,22 @@ int i915_gem_fault(struct vm_fault *vmf)
return ret;
}
+static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ list_del_init(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ vma->flags &= ~I915_VMA_USERFAULT;
+ }
+}
+
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@@ -2057,9 +2075,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (list_empty(&obj->userfault_link))
goto out;
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
+ __i915_gem_object_release_mmap(obj);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
@@ -2087,11 +2103,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link) {
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
- }
+ &dev_priv->mm.userfault_list, userfault_link)
+ __i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
@@ -2114,7 +2127,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
if (!reg->vma)
continue;
- GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true;
}
}
@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
- if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
+ if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
return false;
list_add(&vma->evict_link, unwind);
@@ -405,7 +405,7 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(®->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
@@ -705,7 +705,8 @@ int i915_vma_unbind(struct i915_vma *vma)
return ret;
/* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
+ if (i915_vma_has_userfault(vma))
+ i915_gem_release_mmap(obj);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
@@ -81,6 +81,7 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(8)
#define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10)
+#define I915_VMA_USERFAULT BIT(11)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -143,6 +144,11 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return vma->flags & I915_VMA_CLOSED;
}
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_USERFAULT;
+}
+
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{
return vma->active;
We don't wish to refault the entire object (other vma) when unbinding one partial vma. To do this track which vma have been faulted into the user's address space. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem.c | 31 ++++++++++++++++++++++--------- drivers/gpu/drm/i915/i915_gem_evict.c | 2 +- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 2 +- drivers/gpu/drm/i915/i915_vma.c | 3 ++- drivers/gpu/drm/i915/i915_vma.h | 6 ++++++ 5 files changed, 32 insertions(+), 12 deletions(-)