@@ -1852,13 +1852,13 @@ void __i915_request_queue(struct i915_request *rq,
local_bh_enable(); /* kick tasklets */
}
-void i915_request_add(struct i915_request *rq)
+void i915_request_add_locked(struct i915_request *rq)
{
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
struct i915_gem_context *ctx;
- lockdep_assert_held(&tl->mutex);
+ intel_context_assert_timeline_is_locked(tl);
lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
@@ -1873,7 +1873,15 @@ void i915_request_add(struct i915_request *rq)
__i915_request_queue(rq, &attr);
- mutex_unlock(&tl->mutex);
+}
+
+void i915_request_add(struct i915_request *rq)
+{
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+
+ i915_request_add_locked(rq);
+
+ intel_context_timeline_unlock(tl);
}
static unsigned long local_clock_ns(unsigned int *cpu)
@@ -425,6 +425,7 @@ int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *dep
int i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence);
+void i915_request_add_locked(struct i915_request *rq);
void i915_request_add(struct i915_request *rq);
bool __i915_request_submit(struct i915_request *request);
i915_request_add() assumes that the timeline is locked whtn the function is called. Before exiting it releases the lock. But in the next commit we have one case where releasing the timeline mutex is not necessary and we don't want that. Make a new i915_request_add_locked() version of the function where the lock is not released. Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Cc: stable@vger.kernel.org --- drivers/gpu/drm/i915/i915_request.c | 14 +++++++++++--- drivers/gpu/drm/i915/i915_request.h | 1 + 2 files changed, 12 insertions(+), 3 deletions(-)