@@ -25,6 +25,45 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
+/**
+ * i915_vm_sync() - Wait until address space is not in use
+ * @vm: address space
+ *
+ * Waits until all requests using the address space are complete.
+ *
+ * Returns: 0 if success, -ve err code upon failure
+ */
+int i915_vm_sync(struct i915_address_space *vm)
+{
+ long ret;
+
+ /* Wait for all requests under this vm to finish */
+ ret = dma_resv_wait_timeout(vm->root_obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return 0;
+ else
+ return -ETIMEDOUT;
+}
+
+/**
+ * i915_vm_is_active() - Check if address space is being used
+ * @vm: address space
+ *
+ * Check if any request using the specified address space is
+ * active.
+ *
+ * Returns: true if address space is active, false otherwise.
+ */
+bool i915_vm_is_active(const struct i915_address_space *vm)
+{
+ return !dma_resv_test_signaled(vm->root_obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
+}
+
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -51,4 +51,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_OFFSET_MASK I915_GTT_PAGE_MASK
+int i915_vm_sync(struct i915_address_space *vm);
+bool i915_vm_is_active(const struct i915_address_space *vm);
+
#endif
@@ -420,6 +420,24 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
return err;
}
+/**
+ * i915_vma_sync() - Wait for the vma to be idle
+ * @vma: vma to be tested
+ *
+ * Returns 0 on success and error code on failure
+ */
+int i915_vma_sync(struct i915_vma *vma)
+{
+ int ret;
+
+ /* Wait for the asynchronous bindings and pending GPU reads */
+ ret = i915_active_wait(&vma->active);
+ if (ret || !i915_vma_is_persistent(vma) || i915_vma_is_purged(vma))
+ return ret;
+
+ return i915_vm_sync(vma->vm);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
static int i915_vma_verify_bind_complete(struct i915_vma *vma)
{
@@ -1882,6 +1900,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
int err;
assert_object_held(obj);
+ if (i915_vma_is_persistent(vma))
+ return -EINVAL;
GEM_BUG_ON(!vma->pages);
@@ -2091,6 +2111,14 @@ static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
return ERR_PTR(-EBUSY);
}
+ if (i915_vma_is_persistent(vma) &&
+ __i915_sw_fence_await_reservation(&vma->resource->chain,
+ vma->vm->root_obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+ i915_fence_timeout(vma->vm->i915),
+ GFP_NOWAIT | __GFP_NOWARN) < 0)
+ return ERR_PTR(-EBUSY);
+
fence = __i915_vma_evict(vma, true);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
@@ -51,12 +51,6 @@ i915_vma_create_persistent(struct drm_i915_gem_object *obj,
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
#define I915_VMA_RELEASE_MAP BIT(0)
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
- return !i915_active_is_idle(&vma->active);
-}
-
/* do not reserve memory to prevent deadlocks */
#define __EXEC_OBJECT_NO_RESERVE BIT(31)
@@ -162,6 +156,18 @@ static inline void i915_vma_set_purged(struct i915_vma *vma)
set_bit(I915_VMA_PURGED_BIT, __i915_vma_flags(vma));
}
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ if (i915_vma_is_persistent(vma)) {
+ if (i915_vma_is_purged(vma))
+ return false;
+
+ return i915_vm_is_active(vma->vm);
+ }
+
+ return !i915_active_is_idle(&vma->active);
+}
+
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
{
i915_gem_object_get(vma->obj);
@@ -433,12 +439,7 @@ void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
int i915_vma_wait_for_bind(struct i915_vma *vma);
-
-static inline int i915_vma_sync(struct i915_vma *vma)
-{
- /* Wait for the asynchronous bindings and pending GPU reads */
- return i915_active_wait(&vma->active);
-}
+int i915_vma_sync(struct i915_vma *vma);
/**
* i915_vma_get_current_resource - Get the current resource of the vma