@@ -1888,18 +1888,12 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
-static int
-__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
-{
- return __i915_request_await_exclusive(rq, &vma->active);
-}
-
static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{
int err;
/* Wait for the vma to be bound before we start! */
- err = __i915_request_await_bind(rq, vma);
+ err = i915_request_await_bind(rq, vma);
if (err)
return err;
@@ -54,6 +54,22 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
/* do not reserve memory to prevent deadlocks */
#define __EXEC_OBJECT_NO_RESERVE BIT(31)
+/**
+ * i915_request_await_bind() - Setup request to wait for a vma bind completion
+ * @rq: the request which should wait
+ * @vma: vma whose binding @rq should wait to complete
+ *
+ * Setup the request @rq to asynchronously wait for @vma bind to complete
+ * before starting execution.
+ *
+ * Returns 0 on success, error code on failure.
+ */
+static inline int
+i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
+{
+ return __i915_request_await_exclusive(rq, &vma->active);
+}
+
int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
struct dma_fence *fence,