diff mbox

[01/12] drm/i915: Split request submit/execute phase into two

Message ID 20161102175051.29163-2-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Nov. 2, 2016, 5:50 p.m. UTC
In order to support deferred scheduling, we need to differentiate
between when the request is ready to run (i.e. the submit fence is
signaled) and when the request is actually run (a new execute fence).
This is typically split between the request itself wanting to wait upon
others (for which we use the submit fence) and the CPU wanting to wait
upon the request, for which we use the execute fence to be sure the
hardware is ready to signal completion.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_request.c | 33 ++++++++++++++++++++++++---------
 drivers/gpu/drm/i915/i915_gem_request.h |  2 ++
 2 files changed, 26 insertions(+), 9 deletions(-)

Comments

Tvrtko Ursulin Nov. 3, 2016, 10:35 a.m. UTC | #1
On 02/11/2016 17:50, Chris Wilson wrote:
> In order to support deferred scheduling, we need to differentiate
> between when the request is ready to run (i.e. the submit fence is
> signaled) and when the request is actually run (a new execute fence).
> This is typically split between the request itself wanting to wait upon
> others (for which we use the submit fence) and the CPU wanting to wait
> upon the request, for which we use the execute fence to be sure the
> hardware is ready to signal completion.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_gem_request.c | 33 ++++++++++++++++++++++++---------
>  drivers/gpu/drm/i915/i915_gem_request.h |  2 ++
>  2 files changed, 26 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 79b0046d9a57..1ae5a2f8953f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -351,11 +351,19 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
>  	list_move_tail(&request->link, &timeline->requests);
>  	spin_unlock(&request->timeline->lock);
>
> +	i915_sw_fence_commit(&request->execute);
> +
>  	spin_unlock_irqrestore(&timeline->lock, flags);
>
>  	return NOTIFY_DONE;
>  }
>
> +static int __i915_sw_fence_call
> +execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
> +{
> +	return NOTIFY_DONE;
> +}
> +
>  /**
>   * i915_gem_request_alloc - allocate a request structure
>   *
> @@ -441,6 +449,12 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
>  		       __timeline_get_seqno(req->timeline->common));
>
>  	i915_sw_fence_init(&req->submit, submit_notify);
> +	i915_sw_fence_init(&req->execute, execute_notify);
> +	/* Ensure that the execute fence completes after the submit fence -
> +	 * as we complete the execute fence from within the submit fence
> +	 * callback, its completion would otherwise be visible first.
> +	 */
> +	i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
>
>  	INIT_LIST_HEAD(&req->active_list);
>  	req->i915 = dev_priv;
> @@ -817,9 +831,9 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
>  }
>
>  static long
> -__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
> -			       unsigned int flags,
> -			       long timeout)
> +__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
> +				unsigned int flags,
> +				long timeout)
>  {
>  	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
>  		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
> @@ -831,9 +845,9 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
>  		add_wait_queue(q, &reset);
>
>  	do {
> -		prepare_to_wait(&request->submit.wait, &wait, state);
> +		prepare_to_wait(&request->execute.wait, &wait, state);
>
> -		if (i915_sw_fence_done(&request->submit))
> +		if (i915_sw_fence_done(&request->execute))
>  			break;
>
>  		if (flags & I915_WAIT_LOCKED &&
> @@ -851,7 +865,7 @@ __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
>
>  		timeout = io_schedule_timeout(timeout);
>  	} while (timeout);
> -	finish_wait(&request->submit.wait, &wait);
> +	finish_wait(&request->execute.wait, &wait);
>
>  	if (flags & I915_WAIT_LOCKED)
>  		remove_wait_queue(q, &reset);
> @@ -903,13 +917,14 @@ long i915_wait_request(struct drm_i915_gem_request *req,
>
>  	trace_i915_gem_request_wait_begin(req);
>
> -	if (!i915_sw_fence_done(&req->submit)) {
> -		timeout = __i915_request_wait_for_submit(req, flags, timeout);
> +	if (!i915_sw_fence_done(&req->execute)) {
> +		timeout = __i915_request_wait_for_execute(req, flags, timeout);
>  		if (timeout < 0)
>  			goto complete;
>
> -		GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
> +		GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
>  	}
> +	GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
>  	GEM_BUG_ON(!req->global_seqno);
>
>  	/* Optimistic short spin before touching IRQs */
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index 75f8360b3421..ed13f37fea0f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -85,7 +85,9 @@ struct drm_i915_gem_request {
>  	struct intel_signal_node signaling;
>
>  	struct i915_sw_fence submit;
> +	struct i915_sw_fence execute;
>  	wait_queue_t submitq;
> +	wait_queue_t execq;
>
>  	u32 global_seqno;
>
>

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 79b0046d9a57..1ae5a2f8953f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -351,11 +351,19 @@  submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 	list_move_tail(&request->link, &timeline->requests);
 	spin_unlock(&request->timeline->lock);
 
+	i915_sw_fence_commit(&request->execute);
+
 	spin_unlock_irqrestore(&timeline->lock, flags);
 
 	return NOTIFY_DONE;
 }
 
+static int __i915_sw_fence_call
+execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	return NOTIFY_DONE;
+}
+
 /**
  * i915_gem_request_alloc - allocate a request structure
  *
@@ -441,6 +449,12 @@  i915_gem_request_alloc(struct intel_engine_cs *engine,
 		       __timeline_get_seqno(req->timeline->common));
 
 	i915_sw_fence_init(&req->submit, submit_notify);
+	i915_sw_fence_init(&req->execute, execute_notify);
+	/* Ensure that the execute fence completes after the submit fence -
+	 * as we complete the execute fence from within the submit fence
+	 * callback, its completion would otherwise be visible first.
+	 */
+	i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
 
 	INIT_LIST_HEAD(&req->active_list);
 	req->i915 = dev_priv;
@@ -817,9 +831,9 @@  bool __i915_spin_request(const struct drm_i915_gem_request *req,
 }
 
 static long
-__i915_request_wait_for_submit(struct drm_i915_gem_request *request,
-			       unsigned int flags,
-			       long timeout)
+__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
+				unsigned int flags,
+				long timeout)
 {
 	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -831,9 +845,9 @@  __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
 		add_wait_queue(q, &reset);
 
 	do {
-		prepare_to_wait(&request->submit.wait, &wait, state);
+		prepare_to_wait(&request->execute.wait, &wait, state);
 
-		if (i915_sw_fence_done(&request->submit))
+		if (i915_sw_fence_done(&request->execute))
 			break;
 
 		if (flags & I915_WAIT_LOCKED &&
@@ -851,7 +865,7 @@  __i915_request_wait_for_submit(struct drm_i915_gem_request *request,
 
 		timeout = io_schedule_timeout(timeout);
 	} while (timeout);
-	finish_wait(&request->submit.wait, &wait);
+	finish_wait(&request->execute.wait, &wait);
 
 	if (flags & I915_WAIT_LOCKED)
 		remove_wait_queue(q, &reset);
@@ -903,13 +917,14 @@  long i915_wait_request(struct drm_i915_gem_request *req,
 
 	trace_i915_gem_request_wait_begin(req);
 
-	if (!i915_sw_fence_done(&req->submit)) {
-		timeout = __i915_request_wait_for_submit(req, flags, timeout);
+	if (!i915_sw_fence_done(&req->execute)) {
+		timeout = __i915_request_wait_for_execute(req, flags, timeout);
 		if (timeout < 0)
 			goto complete;
 
-		GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
+		GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
 	}
+	GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
 	GEM_BUG_ON(!req->global_seqno);
 
 	/* Optimistic short spin before touching IRQs */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 75f8360b3421..ed13f37fea0f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -85,7 +85,9 @@  struct drm_i915_gem_request {
 	struct intel_signal_node signaling;
 
 	struct i915_sw_fence submit;
+	struct i915_sw_fence execute;
 	wait_queue_t submitq;
+	wait_queue_t execq;
 
 	u32 global_seqno;