diff mbox

[RFC,04/11] drm/i915/preempt: Implement null preemption method

Message ID 20170223190833.2888-5-michal.winiarski@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Michał Winiarski Feb. 23, 2017, 7:08 p.m. UTC
We're only requesting preemption for requests that have high enough
priority (above threshold). Currently we're also ignoring requests that
have dependencies on different engines.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
---
 drivers/gpu/drm/i915/i915_guc_submission.c |  3 ++
 drivers/gpu/drm/i915/intel_lrc.c           | 81 +++++++++++++++++++++++++++++-
 2 files changed, 83 insertions(+), 1 deletion(-)

Comments

Chris Wilson Feb. 23, 2017, 9:37 p.m. UTC | #1
On Thu, Feb 23, 2017 at 08:08:26PM +0100, Michał Winiarski wrote:
> +static void __execlists_try_preempt(struct intel_engine_cs *engine,
> +				  int prio)
> +{
> +	struct drm_i915_gem_request *rq;
> +	int highest_prio = INT_MIN;
> +	int ret;
> +
> +	spin_lock_irq(&engine->timeline->lock);
> +
> +	/* Engine is idle */
> +	if (execlists_elsp_idle(engine))
> +		goto out_unlock;
> +
> +	if (engine->preempt_requested)
> +		goto out_unlock;
> +
> +	list_for_each_entry_reverse(rq, &engine->timeline->requests, link) {
> +		if (!i915_gem_request_completed(rq)) {
> +			highest_prio = (rq->priotree.priority > highest_prio) ?
> +				 rq->priotree.priority : highest_prio;
> +		} else
> +			break;
> +	}
> +
> +	/* Bail out if our priority is lower than any of the inflight requests
> +	 * (also if there are none requests) */
> +	if (highest_prio == INT_MIN || prio <= highest_prio)
> +		goto out_unlock;
> +
> +	engine->preempt_requested = true;

Here you are meant to unwind the already submitted requests and put them
back onto their rq->timelines.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index feccd65..6d9431d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -491,6 +491,9 @@  static int guc_ring_doorbell(struct i915_guc_client *client)
 
 static int i915_guc_preempt_noop(struct intel_engine_cs *engine)
 {
+	engine->preempt_requested = false;
+	intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d4cfaa1..869b96e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -674,16 +674,93 @@  pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 	return engine;
 }
 
+#define EXECLISTS_PREEMPT_THRESHOLD 512
+
+static void __execlists_try_preempt(struct intel_engine_cs *engine,
+				  int prio)
+{
+	struct drm_i915_gem_request *rq;
+	int highest_prio = INT_MIN;
+	int ret;
+
+	spin_lock_irq(&engine->timeline->lock);
+
+	/* Engine is idle */
+	if (execlists_elsp_idle(engine))
+		goto out_unlock;
+
+	if (engine->preempt_requested)
+		goto out_unlock;
+
+	list_for_each_entry_reverse(rq, &engine->timeline->requests, link) {
+		if (!i915_gem_request_completed(rq)) {
+			highest_prio = (rq->priotree.priority > highest_prio) ?
+				 rq->priotree.priority : highest_prio;
+		} else
+			break;
+	}
+
+	/* Bail out if our priority is lower than any of the inflight requests
+	 * (also if there are none requests) */
+	if (highest_prio == INT_MIN || prio <= highest_prio)
+		goto out_unlock;
+
+	engine->preempt_requested = true;
+
+	spin_unlock_irq(&engine->timeline->lock);
+
+	ret = engine->preempt(engine);
+	if (ret) {
+		spin_lock_irq(&engine->timeline->lock);
+		engine->preempt_requested = false;
+		spin_unlock_irq(&engine->timeline->lock);
+	}
+
+	return;
+
+out_unlock:
+	spin_unlock_irq(&engine->timeline->lock);
+}
+
+static void execlists_try_preempt(struct intel_engine_cs *engine,
+				     int prio,
+				     unsigned long *engines_bumped)
+{
+	int num_engines_bumped = bitmap_weight(engines_bumped,
+					       I915_NUM_ENGINES);
+
+	/* Preemption is disabled */
+	if (!engine->preempt)
+		return;
+
+	/* We're not a high priority request */
+	if (prio < EXECLISTS_PREEMPT_THRESHOLD)
+		return;
+
+	/* We have dependencies on many engines */
+	if (num_engines_bumped > 1)
+		return;
+
+	/* We have dependency on a single engine - but it's not our engine */
+	if (num_engines_bumped == 1 && !test_bit(engine->id, engines_bumped))
+		return;
+
+	__execlists_try_preempt(engine, prio);
+}
+
 static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 {
 	struct intel_engine_cs *engine = NULL;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
 	LIST_HEAD(dfs);
+	DECLARE_BITMAP(engine_bumped, I915_NUM_ENGINES);
 
 	if (prio <= READ_ONCE(request->priotree.priority))
 		return;
 
+	bitmap_zero(engine_bumped, I915_NUM_ENGINES);
+
 	/* Need BKL in order to use the temporary link inside i915_dependency */
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
 
@@ -719,6 +796,7 @@  static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 			continue;
 
 		engine = pt_lock_engine(pt, engine);
+		__set_bit(engine->id, engine_bumped);
 
 		/* If it is not already in the rbtree, we can update the
 		 * priority inplace and skip over it (and its dependencies)
@@ -737,6 +815,7 @@  static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 		INIT_LIST_HEAD(&dep->dfs_link);
 
 		engine = pt_lock_engine(pt, engine);
+		__set_bit(engine->id, engine_bumped);
 
 		if (prio <= pt->priority)
 			continue;
@@ -752,7 +831,7 @@  static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 	if (engine)
 		spin_unlock_irq(&engine->timeline->lock);
 
-	/* XXX Do we need to preempt to make room for us and our deps? */
+	execlists_try_preempt(engine, prio, engine_bumped);
 }
 
 static int execlists_context_pin(struct intel_engine_cs *engine,