diff mbox

[v5,11/35] drm/i915: Added scheduler hook into i915_gem_request_notify()

Message ID 1455805644-6450-12-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Feb. 18, 2016, 2:26 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The scheduler needs to know when requests have completed so that it
can keep its own internal state up to date and can submit new requests
to the hardware from its queue.

v2: Updated due to changes in request handling. The operation is now
reversed from before. Rather than the scheduler being in control of
completion events, it is now the request code itself. The scheduler
merely receives a notification event. It can then optionally request
it's worker thread be woken up after all completion processing is
complete.

v4: Downgraded a BUG_ON to a WARN_ON as the latter is preferred.

v5: Squashed the i915_scheduler.c portions down into the 'start of
scheduler' patch. [Joonas Lahtinen]

For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

Comments

Joonas Lahtinen March 1, 2016, 9:10 a.m. UTC | #1
On to, 2016-02-18 at 14:26 +0000, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
> 
> The scheduler needs to know when requests have completed so that it
> can keep its own internal state up to date and can submit new requests
> to the hardware from its queue.
> 
> v2: Updated due to changes in request handling. The operation is now
> reversed from before. Rather than the scheduler being in control of
> completion events, it is now the request code itself. The scheduler
> merely receives a notification event. It can then optionally request
> it's worker thread be woken up after all completion processing is
> complete.
> 
> v4: Downgraded a BUG_ON to a WARN_ON as the latter is preferred.
> 
> v5: Squashed the i915_scheduler.c portions down into the 'start of
> scheduler' patch. [Joonas Lahtinen]
> 
> For: VIZ-1587
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++++++++++
>  1 file changed, 16 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 0003cfc..c3b7def 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2872,6 +2872,7 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
>  {
>  	struct drm_i915_gem_request *req, *req_next;
>  	unsigned long flags;
> +	bool wake_sched = false;
>  	u32 seqno;
>  
>  	if (list_empty(&ring->fence_signal_list)) {
> @@ -2908,6 +2909,14 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
>  		 */
>  		list_del_init(&req->signal_link);
>  
> +		/*
> +		 * NB: Must notify the scheduler before signalling
> +		 * the node. Otherwise the node can get retired first
> +		 * and call scheduler_clean() while the scheduler
> +		 * thinks it is still active.
> +		 */
> +		wake_sched |= i915_scheduler_notify_request(req);

		if (i915_scheduler_notify_request(req))
			wake_sched = true;

> +
>  		if (!req->cancelled) {
>  			fence_signal_locked(&req->fence);
>  			trace_i915_gem_request_complete(req);
> @@ -2924,6 +2933,13 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
>  
>  	if (!fence_locked)
>  		spin_unlock_irqrestore(&ring->fence_lock, flags);
> +
> +	/* Necessary? Or does the fence_signal() call do an implicit wakeup? */
> +	wake_up_all(&ring->irq_queue);

This should probably be figured out after the struct fence series is
merged.

Regards, Joonas
> +
> +	/* Final scheduler processing after all individual updates are done. */
> +	if (wake_sched)
> +		i915_scheduler_wakeup(ring->dev);
>  }
>  
>  static const char *i915_gem_request_get_driver_name(struct fence *req_fence)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0003cfc..c3b7def 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2872,6 +2872,7 @@  void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 {
 	struct drm_i915_gem_request *req, *req_next;
 	unsigned long flags;
+	bool wake_sched = false;
 	u32 seqno;
 
 	if (list_empty(&ring->fence_signal_list)) {
@@ -2908,6 +2909,14 @@  void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 		 */
 		list_del_init(&req->signal_link);
 
+		/*
+		 * NB: Must notify the scheduler before signalling
+		 * the node. Otherwise the node can get retired first
+		 * and call scheduler_clean() while the scheduler
+		 * thinks it is still active.
+		 */
+		wake_sched |= i915_scheduler_notify_request(req);
+
 		if (!req->cancelled) {
 			fence_signal_locked(&req->fence);
 			trace_i915_gem_request_complete(req);
@@ -2924,6 +2933,13 @@  void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 
 	if (!fence_locked)
 		spin_unlock_irqrestore(&ring->fence_lock, flags);
+
+	/* Necessary? Or does the fence_signal() call do an implicit wakeup? */
+	wake_up_all(&ring->irq_queue);
+
+	/* Final scheduler processing after all individual updates are done. */
+	if (wake_sched)
+		i915_scheduler_wakeup(ring->dev);
 }
 
 static const char *i915_gem_request_get_driver_name(struct fence *req_fence)