@@ -86,6 +86,12 @@ static void i915_gem_vm_bind_remove(struct i915_vma *vma, bool release_obj)
{
lockdep_assert_held(&vma->vm->vm_bind_lock);
+ spin_lock(&vma->vm->vm_rebind_lock);
+ if (!list_empty(&vma->vm_rebind_link))
+ list_del_init(&vma->vm_rebind_link);
+ i915_vma_set_purged(vma);
+ spin_unlock(&vma->vm->vm_rebind_lock);
+
list_del_init(&vma->vm_bind_link);
list_del_init(&vma->non_priv_vm_bind_link);
i915_vm_bind_it_remove(vma, &vma->vm->va);
@@ -290,6 +290,8 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
INIT_LIST_HEAD(&vm->vm_bound_list);
mutex_init(&vm->vm_bind_lock);
INIT_LIST_HEAD(&vm->non_priv_vm_bind_list);
+ INIT_LIST_HEAD(&vm->vm_rebind_list);
+ spin_lock_init(&vm->vm_rebind_lock);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
@@ -266,6 +266,10 @@ struct i915_address_space {
struct list_head vm_bind_list;
/** @vm_bound_list: List of vm_binding completed */
struct list_head vm_bound_list;
+ /** @vm_rebind_list: list of vmas to be rebinded */
+ struct list_head vm_rebind_list;
+ /** @vm_rebind_lock: protects vm_rebound_list */
+ spinlock_t vm_rebind_lock;
/** @va: tree of persistent vmas */
struct rb_root_cached va;
/** @non_priv_vm_bind_list: list of non-private object mappings */
@@ -241,6 +241,7 @@ vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->vm_bind_link);
INIT_LIST_HEAD(&vma->non_priv_vm_bind_link);
+ INIT_LIST_HEAD(&vma->vm_rebind_link);
return vma;
err_unlock:
@@ -1681,6 +1682,14 @@ static void force_unbind(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node))
return;
+ /*
+ * Persistent vma should have been purged by now.
+ * If not, issue a warning and purge it.
+ */
+ if (GEM_WARN_ON(i915_vma_is_persistent(vma) &&
+ !i915_vma_is_purged(vma)))
+ i915_vma_set_purged(vma);
+
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -2042,6 +2051,16 @@ int __i915_vma_unbind(struct i915_vma *vma)
__i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
+
+ if (i915_vma_is_persistent(vma)) {
+ spin_lock(&vma->vm->vm_rebind_lock);
+ if (list_empty(&vma->vm_rebind_link) &&
+ !i915_vma_is_purged(vma))
+ list_add_tail(&vma->vm_rebind_link,
+ &vma->vm->vm_rebind_list);
+ spin_unlock(&vma->vm->vm_rebind_lock);
+ }
+
return 0;
}
@@ -2054,8 +2073,7 @@ static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node))
return NULL;
- if (i915_vma_is_pinned(vma) ||
- &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
+ if (i915_vma_is_pinned(vma))
return ERR_PTR(-EAGAIN);
/*
@@ -2077,6 +2095,15 @@ static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
+ if (i915_vma_is_persistent(vma)) {
+ spin_lock(&vma->vm->vm_rebind_lock);
+ if (list_empty(&vma->vm_rebind_link) &&
+ !i915_vma_is_purged(vma))
+ list_add_tail(&vma->vm_rebind_link,
+ &vma->vm->vm_rebind_list);
+ spin_unlock(&vma->vm->vm_rebind_lock);
+ }
+
return fence;
}
@@ -152,6 +152,16 @@ static inline void i915_vma_set_persistent(struct i915_vma *vma)
set_bit(I915_VMA_PERSISTENT_BIT, __i915_vma_flags(vma));
}
+static inline bool i915_vma_is_purged(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_PURGED_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_set_purged(struct i915_vma *vma)
+{
+ set_bit(I915_VMA_PURGED_BIT, __i915_vma_flags(vma));
+}
+
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
{
i915_gem_object_get(vma->obj);
@@ -267,8 +267,14 @@ struct i915_vma {
/**
* I915_VMA_PERSISTENT_BIT:
* The vma is persistent (created with VM_BIND call).
+ *
+ * I915_VMA_PURGED_BIT:
+ * The persistent vma is force unbound either due to VM_UNBIND call
+ * from UMD or VM is released. Do not check/wait for VM activeness
+ * in i915_vma_is_active() and i915_vma_sync() calls.
*/
#define I915_VMA_PERSISTENT_BIT 19
+#define I915_VMA_PURGED_BIT 20
struct i915_active active;
@@ -299,6 +305,8 @@ struct i915_vma {
struct list_head vm_bind_link;
/** @non_priv_vm_bind_link: Link in non-private persistent VMA list */
struct list_head non_priv_vm_bind_link;
+ /** @vm_rebind_link: link to vm_rebind_list and protected by vm_rebind_lock */
+ struct list_head vm_rebind_link; /* Link in vm_rebind_list */
/** Interval tree structures for persistent vma */